diff --git a/.gcloudignore b/.gcloudignore index 2c51591c28..564e27bc6c 100644 --- a/.gcloudignore +++ b/.gcloudignore @@ -1 +1 @@ -!notely +!notely diff --git a/.gitignore b/.gitignore index 2092f54e78..33a226f651 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,4 @@ -out -.env -learn-cicd-starter -notely +out +.env +learn-cicd-starter +notely diff --git a/Dockerfile b/Dockerfile index 2be3d18b81..3f72f0f3ac 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,7 +1,7 @@ -FROM --platform=linux/amd64 debian:stable-slim - -RUN apt-get update && apt-get install -y ca-certificates - -ADD notely /usr/bin/notely - -CMD ["notely"] +FROM --platform=linux/amd64 debian:stable-slim + +RUN apt-get update && apt-get install -y ca-certificates + +ADD notely /usr/bin/notely + +CMD ["notely"] diff --git a/README.md b/README.md index c2bec0368b..c153e7097d 100644 --- a/README.md +++ b/README.md @@ -1,23 +1,26 @@ -# learn-cicd-starter (Notely) - -This repo contains the starter code for the "Notely" application for the "Learn CICD" course on [Boot.dev](https://boot.dev). - -## Local Development - -Make sure you're on Go version 1.22+. - -Create a `.env` file in the root of the project with the following contents: - -```bash -PORT="8080" -``` - -Run the server: - -```bash -go build -o notely && ./notely -``` - -*This starts the server in non-database mode.* It will serve a simple webpage at `http://localhost:8080`. - -You do *not* need to set up a database or any interactivity on the webpage yet. Instructions for that will come later in the course! +# learn-cicd-starter (Notely) + +This repo contains the starter code for the "Notely" application for the "Learn CICD" course on [Boot.dev](https://boot.dev). + +## Local Development + +Make sure you're on Go version 1.22+. + +Create a `.env` file in the root of the project with the following contents: + +```bash +PORT="8080" +``` + +Run the server: + +```bash +go build -o notely && ./notely +``` + +*This starts the server in non-database mode.* It will serve a simple webpage at `http://localhost:8080`. + +You do *not* need to set up a database or any interactivity on the webpage yet. Instructions for that will come later in the course! + + +"Mefju's version of Boot.dev's Notely app." \ No newline at end of file diff --git a/go.mod b/go.mod index 09e04f9c12..4607d8cc20 100644 --- a/go.mod +++ b/go.mod @@ -1,19 +1,19 @@ -module github.com/bootdotdev/learn-cicd-starter - -go 1.22 - -require ( - github.com/go-chi/chi v1.5.4 - github.com/go-chi/cors v1.2.1 - github.com/google/uuid v1.3.0 - github.com/joho/godotenv v1.5.1 - github.com/tursodatabase/libsql-client-go v0.0.0-20240220085343-4ae0eb9d0898 -) - -require ( - github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230512164433-5d1fd1a340c9 // indirect - github.com/klauspost/compress v1.15.15 // indirect - github.com/libsql/sqlite-antlr4-parser v0.0.0-20230802215326-5cb5bb604475 // indirect - golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect - nhooyr.io/websocket v1.8.7 // indirect -) +module github.com/bootdotdev/learn-cicd-starter + +go 1.22 + +require ( + github.com/go-chi/chi v1.5.4 + github.com/go-chi/cors v1.2.1 + github.com/google/uuid v1.3.0 + github.com/joho/godotenv v1.5.1 + github.com/tursodatabase/libsql-client-go v0.0.0-20240220085343-4ae0eb9d0898 +) + +require ( + github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230512164433-5d1fd1a340c9 // indirect + github.com/klauspost/compress v1.15.15 // indirect + github.com/libsql/sqlite-antlr4-parser v0.0.0-20230802215326-5cb5bb604475 // indirect + golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect + nhooyr.io/websocket v1.8.7 // indirect +) diff --git a/go.sum b/go.sum index e3c6163bd0..d4315d9eb8 100644 --- a/go.sum +++ b/go.sum @@ -1,77 +1,77 @@ -github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230512164433-5d1fd1a340c9 h1:goHVqTbFX3AIo0tzGr14pgfAW2ZfPChKO21Z9MGf/gk= -github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230512164433-5d1fd1a340c9/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= -github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= -github.com/gin-gonic/gin v1.6.3 h1:ahKqKTFpO5KTPHxWZjEdPScmYaGtLo8Y4DMHoEsnp14= -github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= -github.com/go-chi/chi v1.5.4 h1:QHdzF2szwjqVV4wmByUnTcsbIg7UGaQ0tPF2t5GcAIs= -github.com/go-chi/chi v1.5.4/go.mod h1:uaf8YgoFazUOkPBG7fxPftUylNumIev9awIWOENIuEg= -github.com/go-chi/cors v1.2.1 h1:xEC8UT3Rlp2QuWNEr4Fs/c2EAGVKBwy/1vHx3bppil4= -github.com/go-chi/cors v1.2.1/go.mod h1:sSbTewc+6wYHBBCW7ytsFSn836hqM7JxpglAy2Vzc58= -github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= -github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= -github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= -github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= -github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= -github.com/go-playground/validator/v10 v10.2.0 h1:KgJ0snyC2R9VXYN2rneOtQcw5aHQB1Vv0sFl1UcHBOY= -github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= -github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee h1:s+21KNqlpePfkah2I+gwHF8xmJWRjooY+5248k6m4A0= -github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= -github.com/gobwas/pool v0.2.0 h1:QEmUOlnSjWtnpRGHF3SauEiOsy82Cup83Vf2LcMlnc8= -github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= -github.com/gobwas/ws v1.0.2 h1:CoAavW/wd/kulfZmSIBt6p24n4j7tHgNVCjsfHVNUbo= -github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM= -github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= -github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= -github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw= -github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4= -github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= -github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= -github.com/libsql/sqlite-antlr4-parser v0.0.0-20230802215326-5cb5bb604475 h1:6PfEMwfInASh9hkN83aR0j4W/eKaAZt/AURtXAXlas0= -github.com/libsql/sqlite-antlr4-parser v0.0.0-20230802215326-5cb5bb604475/go.mod h1:20nXSmcf0nAscrzqsXeC2/tA3KkV2eCiJqYuyAgl+ss= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/tursodatabase/libsql-client-go v0.0.0-20240220085343-4ae0eb9d0898 h1:1MvEhzI5pvP27e9Dzz861mxk9WzXZLSJwzOU67cKTbU= -github.com/tursodatabase/libsql-client-go v0.0.0-20240220085343-4ae0eb9d0898/go.mod h1:9bKuHS7eZh/0mJndbUOrCx8Ej3PlsRDszj4L7oVYMPQ= -github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= -github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= -github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= -github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= -golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA= -golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g= -nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= +github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230512164433-5d1fd1a340c9 h1:goHVqTbFX3AIo0tzGr14pgfAW2ZfPChKO21Z9MGf/gk= +github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230512164433-5d1fd1a340c9/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.6.3 h1:ahKqKTFpO5KTPHxWZjEdPScmYaGtLo8Y4DMHoEsnp14= +github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= +github.com/go-chi/chi v1.5.4 h1:QHdzF2szwjqVV4wmByUnTcsbIg7UGaQ0tPF2t5GcAIs= +github.com/go-chi/chi v1.5.4/go.mod h1:uaf8YgoFazUOkPBG7fxPftUylNumIev9awIWOENIuEg= +github.com/go-chi/cors v1.2.1 h1:xEC8UT3Rlp2QuWNEr4Fs/c2EAGVKBwy/1vHx3bppil4= +github.com/go-chi/cors v1.2.1/go.mod h1:sSbTewc+6wYHBBCW7ytsFSn836hqM7JxpglAy2Vzc58= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/validator/v10 v10.2.0 h1:KgJ0snyC2R9VXYN2rneOtQcw5aHQB1Vv0sFl1UcHBOY= +github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee h1:s+21KNqlpePfkah2I+gwHF8xmJWRjooY+5248k6m4A0= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= +github.com/gobwas/pool v0.2.0 h1:QEmUOlnSjWtnpRGHF3SauEiOsy82Cup83Vf2LcMlnc8= +github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/ws v1.0.2 h1:CoAavW/wd/kulfZmSIBt6p24n4j7tHgNVCjsfHVNUbo= +github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= +github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw= +github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4= +github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/libsql/sqlite-antlr4-parser v0.0.0-20230802215326-5cb5bb604475 h1:6PfEMwfInASh9hkN83aR0j4W/eKaAZt/AURtXAXlas0= +github.com/libsql/sqlite-antlr4-parser v0.0.0-20230802215326-5cb5bb604475/go.mod h1:20nXSmcf0nAscrzqsXeC2/tA3KkV2eCiJqYuyAgl+ss= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/tursodatabase/libsql-client-go v0.0.0-20240220085343-4ae0eb9d0898 h1:1MvEhzI5pvP27e9Dzz861mxk9WzXZLSJwzOU67cKTbU= +github.com/tursodatabase/libsql-client-go v0.0.0-20240220085343-4ae0eb9d0898/go.mod h1:9bKuHS7eZh/0mJndbUOrCx8Ej3PlsRDszj4L7oVYMPQ= +github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA= +golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g= +nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= diff --git a/handler_notes.go b/handler_notes.go index 85a8e3415d..c1dd72fff6 100644 --- a/handler_notes.go +++ b/handler_notes.go @@ -1,66 +1,66 @@ -package main - -import ( - "encoding/json" - "net/http" - "time" - - "github.com/bootdotdev/learn-cicd-starter/internal/database" - "github.com/google/uuid" -) - -func (cfg *apiConfig) handlerNotesGet(w http.ResponseWriter, r *http.Request, user database.User) { - posts, err := cfg.DB.GetNotesForUser(r.Context(), user.ID) - if err != nil { - respondWithError(w, http.StatusInternalServerError, "Couldn't get posts for user", err) - return - } - - postsResp, err := databasePostsToPosts(posts) - if err != nil { - respondWithError(w, http.StatusInternalServerError, "Couldn't convert posts", err) - return - } - - respondWithJSON(w, http.StatusOK, postsResp) -} - -func (cfg *apiConfig) handlerNotesCreate(w http.ResponseWriter, r *http.Request, user database.User) { - type parameters struct { - Note string `json:"note"` - } - decoder := json.NewDecoder(r.Body) - params := parameters{} - err := decoder.Decode(¶ms) - if err != nil { - respondWithError(w, http.StatusInternalServerError, "Couldn't decode parameters", err) - return - } - - id := uuid.New().String() - err = cfg.DB.CreateNote(r.Context(), database.CreateNoteParams{ - ID: id, - CreatedAt: time.Now().UTC().Format(time.RFC3339), - UpdatedAt: time.Now().UTC().Format(time.RFC3339), - Note: params.Note, - UserID: user.ID, - }) - if err != nil { - respondWithError(w, http.StatusInternalServerError, "Couldn't create note", err) - return - } - - note, err := cfg.DB.GetNote(r.Context(), id) - if err != nil { - respondWithError(w, http.StatusNotFound, "Couldn't get note", err) - return - } - - noteResp, err := databaseNoteToNote(note) - if err != nil { - respondWithError(w, http.StatusInternalServerError, "Couldn't convert note", err) - return - } - - respondWithJSON(w, http.StatusCreated, noteResp) -} +package main + +import ( + "encoding/json" + "net/http" + "time" + + "github.com/bootdotdev/learn-cicd-starter/internal/database" + "github.com/google/uuid" +) + +func (cfg *apiConfig) handlerNotesGet(w http.ResponseWriter, r *http.Request, user database.User) { + posts, err := cfg.DB.GetNotesForUser(r.Context(), user.ID) + if err != nil { + respondWithError(w, http.StatusInternalServerError, "Couldn't get posts for user", err) + return + } + + postsResp, err := databasePostsToPosts(posts) + if err != nil { + respondWithError(w, http.StatusInternalServerError, "Couldn't convert posts", err) + return + } + + respondWithJSON(w, http.StatusOK, postsResp) +} + +func (cfg *apiConfig) handlerNotesCreate(w http.ResponseWriter, r *http.Request, user database.User) { + type parameters struct { + Note string `json:"note"` + } + decoder := json.NewDecoder(r.Body) + params := parameters{} + err := decoder.Decode(¶ms) + if err != nil { + respondWithError(w, http.StatusInternalServerError, "Couldn't decode parameters", err) + return + } + + id := uuid.New().String() + err = cfg.DB.CreateNote(r.Context(), database.CreateNoteParams{ + ID: id, + CreatedAt: time.Now().UTC().Format(time.RFC3339), + UpdatedAt: time.Now().UTC().Format(time.RFC3339), + Note: params.Note, + UserID: user.ID, + }) + if err != nil { + respondWithError(w, http.StatusInternalServerError, "Couldn't create note", err) + return + } + + note, err := cfg.DB.GetNote(r.Context(), id) + if err != nil { + respondWithError(w, http.StatusNotFound, "Couldn't get note", err) + return + } + + noteResp, err := databaseNoteToNote(note) + if err != nil { + respondWithError(w, http.StatusInternalServerError, "Couldn't convert note", err) + return + } + + respondWithJSON(w, http.StatusCreated, noteResp) +} diff --git a/handler_ready.go b/handler_ready.go index 48142e41d4..e8384fc18c 100644 --- a/handler_ready.go +++ b/handler_ready.go @@ -1,7 +1,7 @@ -package main - -import "net/http" - -func handlerReadiness(w http.ResponseWriter, r *http.Request) { - respondWithJSON(w, http.StatusOK, map[string]string{"status": "ok"}) -} +package main + +import "net/http" + +func handlerReadiness(w http.ResponseWriter, r *http.Request) { + respondWithJSON(w, http.StatusOK, map[string]string{"status": "ok"}) +} diff --git a/handler_user.go b/handler_user.go index d53d4316fb..b695ff342c 100644 --- a/handler_user.go +++ b/handler_user.go @@ -1,79 +1,79 @@ -package main - -import ( - "crypto/rand" - "crypto/sha256" - "encoding/hex" - "encoding/json" - "net/http" - "time" - - "github.com/bootdotdev/learn-cicd-starter/internal/database" - "github.com/google/uuid" -) - -func (cfg *apiConfig) handlerUsersCreate(w http.ResponseWriter, r *http.Request) { - type parameters struct { - Name string `json:"name"` - } - decoder := json.NewDecoder(r.Body) - params := parameters{} - err := decoder.Decode(¶ms) - if err != nil { - respondWithError(w, http.StatusInternalServerError, "Couldn't decode parameters", err) - return - } - - apiKey, err := generateRandomSHA256Hash() - if err != nil { - respondWithError(w, http.StatusInternalServerError, "Couldn't gen apikey", err) - return - } - - err = cfg.DB.CreateUser(r.Context(), database.CreateUserParams{ - ID: uuid.New().String(), - CreatedAt: time.Now().UTC().Format(time.RFC3339), - UpdatedAt: time.Now().UTC().Format(time.RFC3339), - Name: params.Name, - ApiKey: apiKey, - }) - if err != nil { - respondWithError(w, http.StatusInternalServerError, "Couldn't create user", err) - return - } - - user, err := cfg.DB.GetUser(r.Context(), apiKey) - if err != nil { - respondWithError(w, http.StatusInternalServerError, "Couldn't get user", err) - return - } - - userResp, err := databaseUserToUser(user) - if err != nil { - respondWithError(w, http.StatusInternalServerError, "Couldn't convert user", err) - return - } - respondWithJSON(w, http.StatusCreated, userResp) -} - -func generateRandomSHA256Hash() (string, error) { - randomBytes := make([]byte, 32) - _, err := rand.Read(randomBytes) - if err != nil { - return "", err - } - hash := sha256.Sum256(randomBytes) - hashString := hex.EncodeToString(hash[:]) - return hashString, nil -} - -func (cfg *apiConfig) handlerUsersGet(w http.ResponseWriter, r *http.Request, user database.User) { - - userResp, err := databaseUserToUser(user) - if err != nil { - respondWithError(w, http.StatusInternalServerError, "Couldn't convert user", err) - return - } - - respondWithJSON(w, http.StatusOK, userResp) -} +package main + +import ( + "crypto/rand" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "net/http" + "time" + + "github.com/bootdotdev/learn-cicd-starter/internal/database" + "github.com/google/uuid" +) + +func (cfg *apiConfig) handlerUsersCreate(w http.ResponseWriter, r *http.Request) { + type parameters struct { + Name string `json:"name"` + } + decoder := json.NewDecoder(r.Body) + params := parameters{} + err := decoder.Decode(¶ms) + if err != nil { + respondWithError(w, http.StatusInternalServerError, "Couldn't decode parameters", err) + return + } + + apiKey, err := generateRandomSHA256Hash() + if err != nil { + respondWithError(w, http.StatusInternalServerError, "Couldn't gen apikey", err) + return + } + + err = cfg.DB.CreateUser(r.Context(), database.CreateUserParams{ + ID: uuid.New().String(), + CreatedAt: time.Now().UTC().Format(time.RFC3339), + UpdatedAt: time.Now().UTC().Format(time.RFC3339), + Name: params.Name, + ApiKey: apiKey, + }) + if err != nil { + respondWithError(w, http.StatusInternalServerError, "Couldn't create user", err) + return + } + + user, err := cfg.DB.GetUser(r.Context(), apiKey) + if err != nil { + respondWithError(w, http.StatusInternalServerError, "Couldn't get user", err) + return + } + + userResp, err := databaseUserToUser(user) + if err != nil { + respondWithError(w, http.StatusInternalServerError, "Couldn't convert user", err) + return + } + respondWithJSON(w, http.StatusCreated, userResp) +} + +func generateRandomSHA256Hash() (string, error) { + randomBytes := make([]byte, 32) + _, err := rand.Read(randomBytes) + if err != nil { + return "", err + } + hash := sha256.Sum256(randomBytes) + hashString := hex.EncodeToString(hash[:]) + return hashString, nil +} + +func (cfg *apiConfig) handlerUsersGet(w http.ResponseWriter, r *http.Request, user database.User) { + + userResp, err := databaseUserToUser(user) + if err != nil { + respondWithError(w, http.StatusInternalServerError, "Couldn't convert user", err) + return + } + + respondWithJSON(w, http.StatusOK, userResp) +} diff --git a/internal/auth/auth.go b/internal/auth/auth.go index f969aacf63..90dc225f3a 100644 --- a/internal/auth/auth.go +++ b/internal/auth/auth.go @@ -1,23 +1,23 @@ -package auth - -import ( - "errors" - "net/http" - "strings" -) - -var ErrNoAuthHeaderIncluded = errors.New("no authorization header included") - -// GetAPIKey - -func GetAPIKey(headers http.Header) (string, error) { - authHeader := headers.Get("Authorization") - if authHeader == "" { - return "", ErrNoAuthHeaderIncluded - } - splitAuth := strings.Split(authHeader, " ") - if len(splitAuth) < 2 || splitAuth[0] != "ApiKey" { - return "", errors.New("malformed authorization header") - } - - return splitAuth[1], nil -} +package auth + +import ( + "errors" + "net/http" + "strings" +) + +var ErrNoAuthHeaderIncluded = errors.New("no authorization header included") + +// GetAPIKey - +func GetAPIKey(headers http.Header) (string, error) { + authHeader := headers.Get("Authorization") + if authHeader == "" { + return "", ErrNoAuthHeaderIncluded + } + splitAuth := strings.Split(authHeader, " ") + if len(splitAuth) < 2 || splitAuth[0] != "ApiKey" { + return "", errors.New("malformed authorization header") + } + + return splitAuth[1], nil +} diff --git a/internal/database/db.go b/internal/database/db.go index 61f5bf46c8..ee4800d138 100644 --- a/internal/database/db.go +++ b/internal/database/db.go @@ -1,31 +1,31 @@ -// Code generated by sqlc. DO NOT EDIT. -// versions: -// sqlc v1.25.0 - -package database - -import ( - "context" - "database/sql" -) - -type DBTX interface { - ExecContext(context.Context, string, ...interface{}) (sql.Result, error) - PrepareContext(context.Context, string) (*sql.Stmt, error) - QueryContext(context.Context, string, ...interface{}) (*sql.Rows, error) - QueryRowContext(context.Context, string, ...interface{}) *sql.Row -} - -func New(db DBTX) *Queries { - return &Queries{db: db} -} - -type Queries struct { - db DBTX -} - -func (q *Queries) WithTx(tx *sql.Tx) *Queries { - return &Queries{ - db: tx, - } -} +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.25.0 + +package database + +import ( + "context" + "database/sql" +) + +type DBTX interface { + ExecContext(context.Context, string, ...interface{}) (sql.Result, error) + PrepareContext(context.Context, string) (*sql.Stmt, error) + QueryContext(context.Context, string, ...interface{}) (*sql.Rows, error) + QueryRowContext(context.Context, string, ...interface{}) *sql.Row +} + +func New(db DBTX) *Queries { + return &Queries{db: db} +} + +type Queries struct { + db DBTX +} + +func (q *Queries) WithTx(tx *sql.Tx) *Queries { + return &Queries{ + db: tx, + } +} diff --git a/internal/database/models.go b/internal/database/models.go index 70333ba1ab..4348e9d141 100644 --- a/internal/database/models.go +++ b/internal/database/models.go @@ -1,23 +1,23 @@ -// Code generated by sqlc. DO NOT EDIT. -// versions: -// sqlc v1.25.0 - -package database - -import () - -type Note struct { - ID string - CreatedAt string - UpdatedAt string - Note string - UserID string -} - -type User struct { - ID string - CreatedAt string - UpdatedAt string - Name string - ApiKey string -} +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.25.0 + +package database + +import () + +type Note struct { + ID string + CreatedAt string + UpdatedAt string + Note string + UserID string +} + +type User struct { + ID string + CreatedAt string + UpdatedAt string + Name string + ApiKey string +} diff --git a/internal/database/notes.sql.go b/internal/database/notes.sql.go index 234dd4c131..f9e8dfd8b7 100644 --- a/internal/database/notes.sql.go +++ b/internal/database/notes.sql.go @@ -1,86 +1,86 @@ -// Code generated by sqlc. DO NOT EDIT. -// versions: -// sqlc v1.25.0 -// source: notes.sql - -package database - -import ( - "context" -) - -const createNote = `-- name: CreateNote :exec -INSERT INTO notes (id, created_at, updated_at, note, user_id) -VALUES (?, ?, ?, ?, ?) -` - -type CreateNoteParams struct { - ID string - CreatedAt string - UpdatedAt string - Note string - UserID string -} - -func (q *Queries) CreateNote(ctx context.Context, arg CreateNoteParams) error { - _, err := q.db.ExecContext(ctx, createNote, - arg.ID, - arg.CreatedAt, - arg.UpdatedAt, - arg.Note, - arg.UserID, - ) - return err -} - -const getNote = `-- name: GetNote :one - -SELECT id, created_at, updated_at, note, user_id FROM notes WHERE id = ? -` - -func (q *Queries) GetNote(ctx context.Context, id string) (Note, error) { - row := q.db.QueryRowContext(ctx, getNote, id) - var i Note - err := row.Scan( - &i.ID, - &i.CreatedAt, - &i.UpdatedAt, - &i.Note, - &i.UserID, - ) - return i, err -} - -const getNotesForUser = `-- name: GetNotesForUser :many - -SELECT id, created_at, updated_at, note, user_id FROM notes WHERE user_id = ? -` - -func (q *Queries) GetNotesForUser(ctx context.Context, userID string) ([]Note, error) { - rows, err := q.db.QueryContext(ctx, getNotesForUser, userID) - if err != nil { - return nil, err - } - defer rows.Close() - var items []Note - for rows.Next() { - var i Note - if err := rows.Scan( - &i.ID, - &i.CreatedAt, - &i.UpdatedAt, - &i.Note, - &i.UserID, - ); err != nil { - return nil, err - } - items = append(items, i) - } - if err := rows.Close(); err != nil { - return nil, err - } - if err := rows.Err(); err != nil { - return nil, err - } - return items, nil -} +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.25.0 +// source: notes.sql + +package database + +import ( + "context" +) + +const createNote = `-- name: CreateNote :exec +INSERT INTO notes (id, created_at, updated_at, note, user_id) +VALUES (?, ?, ?, ?, ?) +` + +type CreateNoteParams struct { + ID string + CreatedAt string + UpdatedAt string + Note string + UserID string +} + +func (q *Queries) CreateNote(ctx context.Context, arg CreateNoteParams) error { + _, err := q.db.ExecContext(ctx, createNote, + arg.ID, + arg.CreatedAt, + arg.UpdatedAt, + arg.Note, + arg.UserID, + ) + return err +} + +const getNote = `-- name: GetNote :one + +SELECT id, created_at, updated_at, note, user_id FROM notes WHERE id = ? +` + +func (q *Queries) GetNote(ctx context.Context, id string) (Note, error) { + row := q.db.QueryRowContext(ctx, getNote, id) + var i Note + err := row.Scan( + &i.ID, + &i.CreatedAt, + &i.UpdatedAt, + &i.Note, + &i.UserID, + ) + return i, err +} + +const getNotesForUser = `-- name: GetNotesForUser :many + +SELECT id, created_at, updated_at, note, user_id FROM notes WHERE user_id = ? +` + +func (q *Queries) GetNotesForUser(ctx context.Context, userID string) ([]Note, error) { + rows, err := q.db.QueryContext(ctx, getNotesForUser, userID) + if err != nil { + return nil, err + } + defer rows.Close() + var items []Note + for rows.Next() { + var i Note + if err := rows.Scan( + &i.ID, + &i.CreatedAt, + &i.UpdatedAt, + &i.Note, + &i.UserID, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} diff --git a/internal/database/users.sql.go b/internal/database/users.sql.go index 737b0d1d5f..bc813c7a69 100644 --- a/internal/database/users.sql.go +++ b/internal/database/users.sql.go @@ -1,58 +1,58 @@ -// Code generated by sqlc. DO NOT EDIT. -// versions: -// sqlc v1.25.0 -// source: users.sql - -package database - -import ( - "context" -) - -const createUser = `-- name: CreateUser :exec -INSERT INTO users (id, created_at, updated_at, name, api_key) -VALUES ( - ?, - ?, - ?, - ?, - ? -) -` - -type CreateUserParams struct { - ID string - CreatedAt string - UpdatedAt string - Name string - ApiKey string -} - -func (q *Queries) CreateUser(ctx context.Context, arg CreateUserParams) error { - _, err := q.db.ExecContext(ctx, createUser, - arg.ID, - arg.CreatedAt, - arg.UpdatedAt, - arg.Name, - arg.ApiKey, - ) - return err -} - -const getUser = `-- name: GetUser :one - -SELECT id, created_at, updated_at, name, api_key FROM users WHERE api_key = ? -` - -func (q *Queries) GetUser(ctx context.Context, apiKey string) (User, error) { - row := q.db.QueryRowContext(ctx, getUser, apiKey) - var i User - err := row.Scan( - &i.ID, - &i.CreatedAt, - &i.UpdatedAt, - &i.Name, - &i.ApiKey, - ) - return i, err -} +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.25.0 +// source: users.sql + +package database + +import ( + "context" +) + +const createUser = `-- name: CreateUser :exec +INSERT INTO users (id, created_at, updated_at, name, api_key) +VALUES ( + ?, + ?, + ?, + ?, + ? +) +` + +type CreateUserParams struct { + ID string + CreatedAt string + UpdatedAt string + Name string + ApiKey string +} + +func (q *Queries) CreateUser(ctx context.Context, arg CreateUserParams) error { + _, err := q.db.ExecContext(ctx, createUser, + arg.ID, + arg.CreatedAt, + arg.UpdatedAt, + arg.Name, + arg.ApiKey, + ) + return err +} + +const getUser = `-- name: GetUser :one + +SELECT id, created_at, updated_at, name, api_key FROM users WHERE api_key = ? +` + +func (q *Queries) GetUser(ctx context.Context, apiKey string) (User, error) { + row := q.db.QueryRowContext(ctx, getUser, apiKey) + var i User + err := row.Scan( + &i.ID, + &i.CreatedAt, + &i.UpdatedAt, + &i.Name, + &i.ApiKey, + ) + return i, err +} diff --git a/json.go b/json.go index 1e6e7985e1..e8a919c91f 100644 --- a/json.go +++ b/json.go @@ -1,34 +1,34 @@ -package main - -import ( - "encoding/json" - "log" - "net/http" -) - -func respondWithError(w http.ResponseWriter, code int, msg string, logErr error) { - if logErr != nil { - log.Println(logErr) - } - if code > 499 { - log.Printf("Responding with 5XX error: %s", msg) - } - type errorResponse struct { - Error string `json:"error"` - } - respondWithJSON(w, code, errorResponse{ - Error: msg, - }) -} - -func respondWithJSON(w http.ResponseWriter, code int, payload interface{}) { - w.Header().Set("Content-Type", "application/json") - dat, err := json.Marshal(payload) - if err != nil { - log.Printf("Error marshalling JSON: %s", err) - w.WriteHeader(500) - return - } - w.WriteHeader(code) - w.Write(dat) -} +package main + +import ( + "encoding/json" + "log" + "net/http" +) + +func respondWithError(w http.ResponseWriter, code int, msg string, logErr error) { + if logErr != nil { + log.Println(logErr) + } + if code > 499 { + log.Printf("Responding with 5XX error: %s", msg) + } + type errorResponse struct { + Error string `json:"error"` + } + respondWithJSON(w, code, errorResponse{ + Error: msg, + }) +} + +func respondWithJSON(w http.ResponseWriter, code int, payload interface{}) { + w.Header().Set("Content-Type", "application/json") + dat, err := json.Marshal(payload) + if err != nil { + log.Printf("Error marshalling JSON: %s", err) + w.WriteHeader(500) + return + } + w.WriteHeader(code) + w.Write(dat) +} diff --git a/main.go b/main.go index 19d7366c5f..29bd02830e 100644 --- a/main.go +++ b/main.go @@ -1,98 +1,98 @@ -package main - -import ( - "database/sql" - "embed" - "io" - "log" - "net/http" - "os" - - "github.com/go-chi/chi" - "github.com/go-chi/cors" - "github.com/joho/godotenv" - - "github.com/bootdotdev/learn-cicd-starter/internal/database" - - _ "github.com/tursodatabase/libsql-client-go/libsql" -) - -type apiConfig struct { - DB *database.Queries -} - -//go:embed static/* -var staticFiles embed.FS - -func main() { - err := godotenv.Load(".env") - if err != nil { - log.Printf("warning: assuming default configuration. .env unreadable: %v", err) - } - - port := os.Getenv("PORT") - if port == "" { - log.Fatal("PORT environment variable is not set") - } - - apiCfg := apiConfig{} - - // https://github.com/libsql/libsql-client-go/#open-a-connection-to-sqld - // libsql://[your-database].turso.io?authToken=[your-auth-token] - dbURL := os.Getenv("DATABASE_URL") - if dbURL == "" { - log.Println("DATABASE_URL environment variable is not set") - log.Println("Running without CRUD endpoints") - } else { - db, err := sql.Open("libsql", dbURL) - if err != nil { - log.Fatal(err) - } - dbQueries := database.New(db) - apiCfg.DB = dbQueries - log.Println("Connected to database!") - } - - router := chi.NewRouter() - - router.Use(cors.Handler(cors.Options{ - AllowedOrigins: []string{"https://*", "http://*"}, - AllowedMethods: []string{"GET", "POST", "PUT", "DELETE", "OPTIONS"}, - AllowedHeaders: []string{"*"}, - ExposedHeaders: []string{"Link"}, - AllowCredentials: false, - MaxAge: 300, - })) - - router.Get("/", func(w http.ResponseWriter, r *http.Request) { - f, err := staticFiles.Open("static/index.html") - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - defer f.Close() - if _, err := io.Copy(w, f); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - } - }) - - v1Router := chi.NewRouter() - - if apiCfg.DB != nil { - v1Router.Post("/users", apiCfg.handlerUsersCreate) - v1Router.Get("/users", apiCfg.middlewareAuth(apiCfg.handlerUsersGet)) - v1Router.Get("/notes", apiCfg.middlewareAuth(apiCfg.handlerNotesGet)) - v1Router.Post("/notes", apiCfg.middlewareAuth(apiCfg.handlerNotesCreate)) - } - - v1Router.Get("/healthz", handlerReadiness) - - router.Mount("/v1", v1Router) - srv := &http.Server{ - Addr: ":" + port, - Handler: router, - } - - log.Printf("Serving on port: %s\n", port) - log.Fatal(srv.ListenAndServe()) -} +package main + +import ( + "database/sql" + "embed" + "io" + "log" + "net/http" + "os" + + "github.com/go-chi/chi" + "github.com/go-chi/cors" + "github.com/joho/godotenv" + + "github.com/bootdotdev/learn-cicd-starter/internal/database" + + _ "github.com/tursodatabase/libsql-client-go/libsql" +) + +type apiConfig struct { + DB *database.Queries +} + +//go:embed static/* +var staticFiles embed.FS + +func main() { + err := godotenv.Load(".env") + if err != nil { + log.Printf("warning: assuming default configuration. .env unreadable: %v", err) + } + + port := os.Getenv("PORT") + if port == "" { + log.Fatal("PORT environment variable is not set") + } + + apiCfg := apiConfig{} + + // https://github.com/libsql/libsql-client-go/#open-a-connection-to-sqld + // libsql://[your-database].turso.io?authToken=[your-auth-token] + dbURL := os.Getenv("DATABASE_URL") + if dbURL == "" { + log.Println("DATABASE_URL environment variable is not set") + log.Println("Running without CRUD endpoints") + } else { + db, err := sql.Open("libsql", dbURL) + if err != nil { + log.Fatal(err) + } + dbQueries := database.New(db) + apiCfg.DB = dbQueries + log.Println("Connected to database!") + } + + router := chi.NewRouter() + + router.Use(cors.Handler(cors.Options{ + AllowedOrigins: []string{"https://*", "http://*"}, + AllowedMethods: []string{"GET", "POST", "PUT", "DELETE", "OPTIONS"}, + AllowedHeaders: []string{"*"}, + ExposedHeaders: []string{"Link"}, + AllowCredentials: false, + MaxAge: 300, + })) + + router.Get("/", func(w http.ResponseWriter, r *http.Request) { + f, err := staticFiles.Open("static/index.html") + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + defer f.Close() + if _, err := io.Copy(w, f); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } + }) + + v1Router := chi.NewRouter() + + if apiCfg.DB != nil { + v1Router.Post("/users", apiCfg.handlerUsersCreate) + v1Router.Get("/users", apiCfg.middlewareAuth(apiCfg.handlerUsersGet)) + v1Router.Get("/notes", apiCfg.middlewareAuth(apiCfg.handlerNotesGet)) + v1Router.Post("/notes", apiCfg.middlewareAuth(apiCfg.handlerNotesCreate)) + } + + v1Router.Get("/healthz", handlerReadiness) + + router.Mount("/v1", v1Router) + srv := &http.Server{ + Addr: ":" + port, + Handler: router, + } + + log.Printf("Serving on port: %s\n", port) + log.Fatal(srv.ListenAndServe()) +} diff --git a/middleware_auth.go b/middleware_auth.go index 6cbe03f867..1f9f1096e2 100644 --- a/middleware_auth.go +++ b/middleware_auth.go @@ -1,28 +1,28 @@ -package main - -import ( - "net/http" - - "github.com/bootdotdev/learn-cicd-starter/internal/auth" - "github.com/bootdotdev/learn-cicd-starter/internal/database" -) - -type authedHandler func(http.ResponseWriter, *http.Request, database.User) - -func (cfg *apiConfig) middlewareAuth(handler authedHandler) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - apiKey, err := auth.GetAPIKey(r.Header) - if err != nil { - respondWithError(w, http.StatusUnauthorized, "Couldn't find api key", err) - return - } - - user, err := cfg.DB.GetUser(r.Context(), apiKey) - if err != nil { - respondWithError(w, http.StatusNotFound, "Couldn't get user", err) - return - } - - handler(w, r, user) - } -} +package main + +import ( + "net/http" + + "github.com/bootdotdev/learn-cicd-starter/internal/auth" + "github.com/bootdotdev/learn-cicd-starter/internal/database" +) + +type authedHandler func(http.ResponseWriter, *http.Request, database.User) + +func (cfg *apiConfig) middlewareAuth(handler authedHandler) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + apiKey, err := auth.GetAPIKey(r.Header) + if err != nil { + respondWithError(w, http.StatusUnauthorized, "Couldn't find api key", err) + return + } + + user, err := cfg.DB.GetUser(r.Context(), apiKey) + if err != nil { + respondWithError(w, http.StatusNotFound, "Couldn't get user", err) + return + } + + handler(w, r, user) + } +} diff --git a/models.go b/models.go index 6d79f5f940..52b3946976 100644 --- a/models.go +++ b/models.go @@ -1,74 +1,74 @@ -package main - -import ( - "time" - - "github.com/bootdotdev/learn-cicd-starter/internal/database" -) - -type User struct { - ID string `json:"id"` - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at"` - Name string `json:"name"` - ApiKey string `json:"api_key"` -} - -func databaseUserToUser(user database.User) (User, error) { - createdAt, err := time.Parse(time.RFC3339, user.CreatedAt) - if err != nil { - return User{}, err - } - - updatedAt, err := time.Parse(time.RFC3339, user.UpdatedAt) - if err != nil { - return User{}, err - } - return User{ - ID: user.ID, - CreatedAt: createdAt, - UpdatedAt: updatedAt, - Name: user.Name, - ApiKey: user.ApiKey, - }, nil -} - -type Note struct { - ID string `json:"id"` - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at"` - Note string `json:"note"` - UserID string `json:"user_id"` -} - -func databaseNoteToNote(post database.Note) (Note, error) { - createdAt, err := time.Parse(time.RFC3339, post.CreatedAt) - if err != nil { - return Note{}, err - } - - updatedAt, err := time.Parse(time.RFC3339, post.UpdatedAt) - if err != nil { - return Note{}, err - } - return Note{ - ID: post.ID, - CreatedAt: createdAt, - UpdatedAt: updatedAt, - Note: post.Note, - UserID: post.UserID, - }, nil -} - -func databasePostsToPosts(notes []database.Note) ([]Note, error) { - result := make([]Note, len(notes)) - for i, note := range notes { - var err error - result[i], err = databaseNoteToNote(note) - if err != nil { - return nil, err - } - - } - return result, nil -} +package main + +import ( + "time" + + "github.com/bootdotdev/learn-cicd-starter/internal/database" +) + +type User struct { + ID string `json:"id"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + Name string `json:"name"` + ApiKey string `json:"api_key"` +} + +func databaseUserToUser(user database.User) (User, error) { + createdAt, err := time.Parse(time.RFC3339, user.CreatedAt) + if err != nil { + return User{}, err + } + + updatedAt, err := time.Parse(time.RFC3339, user.UpdatedAt) + if err != nil { + return User{}, err + } + return User{ + ID: user.ID, + CreatedAt: createdAt, + UpdatedAt: updatedAt, + Name: user.Name, + ApiKey: user.ApiKey, + }, nil +} + +type Note struct { + ID string `json:"id"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + Note string `json:"note"` + UserID string `json:"user_id"` +} + +func databaseNoteToNote(post database.Note) (Note, error) { + createdAt, err := time.Parse(time.RFC3339, post.CreatedAt) + if err != nil { + return Note{}, err + } + + updatedAt, err := time.Parse(time.RFC3339, post.UpdatedAt) + if err != nil { + return Note{}, err + } + return Note{ + ID: post.ID, + CreatedAt: createdAt, + UpdatedAt: updatedAt, + Note: post.Note, + UserID: post.UserID, + }, nil +} + +func databasePostsToPosts(notes []database.Note) ([]Note, error) { + result := make([]Note, len(notes)) + for i, note := range notes { + var err error + result[i], err = databaseNoteToNote(note) + if err != nil { + return nil, err + } + + } + return result, nil +} diff --git a/scripts/buildprod.sh b/scripts/buildprod.sh index f08d1a592e..21170fe68a 100755 --- a/scripts/buildprod.sh +++ b/scripts/buildprod.sh @@ -1,3 +1,3 @@ -#!/bin/bash - -CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o notely +#!/bin/bash + +CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o notely diff --git a/scripts/migrateup.sh b/scripts/migrateup.sh index fe69c6fc7a..76c515225c 100755 --- a/scripts/migrateup.sh +++ b/scripts/migrateup.sh @@ -1,8 +1,8 @@ -#!/bin/bash - -if [ -f .env ]; then - source .env -fi - -cd sql/schema -goose turso $DATABASE_URL up +#!/bin/bash + +if [ -f .env ]; then + source .env +fi + +cd sql/schema +goose turso $DATABASE_URL up diff --git a/sql/queries/notes.sql b/sql/queries/notes.sql index 72b50bcda7..79db074df2 100644 --- a/sql/queries/notes.sql +++ b/sql/queries/notes.sql @@ -1,12 +1,12 @@ --- name: CreateNote :exec -INSERT INTO notes (id, created_at, updated_at, note, user_id) -VALUES (?, ?, ?, ?, ?); --- - --- name: GetNote :one -SELECT * FROM notes WHERE id = ?; --- - --- name: GetNotesForUser :many -SELECT * FROM notes WHERE user_id = ?; --- +-- name: CreateNote :exec +INSERT INTO notes (id, created_at, updated_at, note, user_id) +VALUES (?, ?, ?, ?, ?); +-- + +-- name: GetNote :one +SELECT * FROM notes WHERE id = ?; +-- + +-- name: GetNotesForUser :many +SELECT * FROM notes WHERE user_id = ?; +-- diff --git a/sql/queries/users.sql b/sql/queries/users.sql index 3e979b6642..fef608c3a1 100644 --- a/sql/queries/users.sql +++ b/sql/queries/users.sql @@ -1,14 +1,14 @@ --- name: CreateUser :exec -INSERT INTO users (id, created_at, updated_at, name, api_key) -VALUES ( - ?, - ?, - ?, - ?, - ? -); --- - --- name: GetUser :one -SELECT * FROM users WHERE api_key = ?; --- +-- name: CreateUser :exec +INSERT INTO users (id, created_at, updated_at, name, api_key) +VALUES ( + ?, + ?, + ?, + ?, + ? +); +-- + +-- name: GetUser :one +SELECT * FROM users WHERE api_key = ?; +-- diff --git a/sql/schema/001_users.sql b/sql/schema/001_users.sql index 71e5ffbd35..3fd7d552d6 100644 --- a/sql/schema/001_users.sql +++ b/sql/schema/001_users.sql @@ -1,11 +1,11 @@ --- +goose Up -CREATE TABLE users ( - id TEXT PRIMARY KEY, - created_at TEXT NOT NULL, - updated_at TEXT NOT NULL, - name TEXT NOT NULL, - api_key TEXT UNIQUE NOT NULL -); - --- +goose Down -DROP TABLE users; +-- +goose Up +CREATE TABLE users ( + id TEXT PRIMARY KEY, + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL, + name TEXT NOT NULL, + api_key TEXT UNIQUE NOT NULL +); + +-- +goose Down +DROP TABLE users; diff --git a/sql/schema/002_notes.sql b/sql/schema/002_notes.sql index b2c4440990..a5543c8db1 100644 --- a/sql/schema/002_notes.sql +++ b/sql/schema/002_notes.sql @@ -1,11 +1,11 @@ --- +goose Up -CREATE TABLE notes ( - id TEXT PRIMARY KEY, - created_at TEXT NOT NULL, - updated_at TEXT NOT NULL, - note TEXT NOT NULL, - user_id TEXT NOT NULL REFERENCES users(id) ON DELETE CASCADE -); - --- +goose Down -DROP TABLE notes; +-- +goose Up +CREATE TABLE notes ( + id TEXT PRIMARY KEY, + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL, + note TEXT NOT NULL, + user_id TEXT NOT NULL REFERENCES users(id) ON DELETE CASCADE +); + +-- +goose Down +DROP TABLE notes; diff --git a/sqlc.yaml b/sqlc.yaml index 6300c07fec..490b33b227 100644 --- a/sqlc.yaml +++ b/sqlc.yaml @@ -1,8 +1,8 @@ -version: "2" -sql: - - schema: "sql/schema" - queries: "sql/queries" - engine: "sqlite" - gen: - go: - out: "internal/database" +version: "2" +sql: + - schema: "sql/schema" + queries: "sql/queries" + engine: "sqlite" + gen: + go: + out: "internal/database" diff --git a/static/index.html b/static/index.html index 72be101028..2a4a4aa724 100644 --- a/static/index.html +++ b/static/index.html @@ -1,193 +1,193 @@ - - - - - - Notely - - - -

Notely

- -
- - -
- - - - - - - - - - + + + + + + Notely + + + +

Notely

+ +
+ + +
+ + + + + + + + + + diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/LICENSE b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/LICENSE index 52cf18e425..2a8b60561e 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/LICENSE +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/LICENSE @@ -1,26 +1,26 @@ -Copyright 2021 The ANTLR Project - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - 1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - 3. Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from this - software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +Copyright 2021 The ANTLR Project + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + 3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from this + software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/antlrdoc.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/antlrdoc.go index ab51212676..82eda477cb 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/antlrdoc.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/antlrdoc.go @@ -1,68 +1,68 @@ -/* -Package antlr implements the Go version of the ANTLR 4 runtime. - -# The ANTLR Tool - -ANTLR (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing, -or translating structured text or binary files. It's widely used to build languages, tools, and frameworks. -From a grammar, ANTLR generates a parser that can build parse trees and also generates a listener interface -(or visitor) that makes it easy to respond to the recognition of phrases of interest. - -# Code Generation - -ANTLR supports the generation of code in a number of [target languages], and the generated code is supported by a -runtime library, written specifically to support the generated code in the target language. This library is the -runtime for the Go target. - -To generate code for the go target, it is generally recommended to place the source grammar files in a package of -their own, and use the `.sh` script method of generating code, using the go generate directive. In that same directory -it is usual, though not required, to place the antlr tool that should be used to generate the code. That does mean -that the antlr tool JAR file will be checked in to your source code control though, so you are free to use any other -way of specifying the version of the ANTLR tool to use, such as aliasing in `.zshrc` or equivalent, or a profile in -your IDE, or configuration in your CI system. - -Here is a general template for an ANTLR based recognizer in Go: - - . - ├── myproject - ├── parser - │ ├── mygrammar.g4 - │ ├── antlr-4.12.0-complete.jar - │ ├── error_listeners.go - │ ├── generate.go - │ ├── generate.sh - ├── go.mod - ├── go.sum - ├── main.go - └── main_test.go - -Make sure that the package statement in your grammar file(s) reflects the go package they exist in. -The generate.go file then looks like this: - - package parser - - //go:generate ./generate.sh - -And the generate.sh file will look similar to this: - - #!/bin/sh - - alias antlr4='java -Xmx500M -cp "./antlr4-4.12.0-complete.jar:$CLASSPATH" org.antlr.v4.Tool' - antlr4 -Dlanguage=Go -no-visitor -package parser *.g4 - -depending on whether you want visitors or listeners or any other ANTLR options. - -From the command line at the root of your package “myproject” you can then simply issue the command: - - go generate ./... - -# Copyright Notice - -Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. - -Use of this file is governed by the BSD 3-clause license, which can be found in the [LICENSE.txt] file in the project root. - -[target languages]: https://github.com/antlr/antlr4/tree/master/runtime -[LICENSE.txt]: https://github.com/antlr/antlr4/blob/master/LICENSE.txt -*/ -package antlr +/* +Package antlr implements the Go version of the ANTLR 4 runtime. + +# The ANTLR Tool + +ANTLR (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing, +or translating structured text or binary files. It's widely used to build languages, tools, and frameworks. +From a grammar, ANTLR generates a parser that can build parse trees and also generates a listener interface +(or visitor) that makes it easy to respond to the recognition of phrases of interest. + +# Code Generation + +ANTLR supports the generation of code in a number of [target languages], and the generated code is supported by a +runtime library, written specifically to support the generated code in the target language. This library is the +runtime for the Go target. + +To generate code for the go target, it is generally recommended to place the source grammar files in a package of +their own, and use the `.sh` script method of generating code, using the go generate directive. In that same directory +it is usual, though not required, to place the antlr tool that should be used to generate the code. That does mean +that the antlr tool JAR file will be checked in to your source code control though, so you are free to use any other +way of specifying the version of the ANTLR tool to use, such as aliasing in `.zshrc` or equivalent, or a profile in +your IDE, or configuration in your CI system. + +Here is a general template for an ANTLR based recognizer in Go: + + . + ├── myproject + ├── parser + │ ├── mygrammar.g4 + │ ├── antlr-4.12.0-complete.jar + │ ├── error_listeners.go + │ ├── generate.go + │ ├── generate.sh + ├── go.mod + ├── go.sum + ├── main.go + └── main_test.go + +Make sure that the package statement in your grammar file(s) reflects the go package they exist in. +The generate.go file then looks like this: + + package parser + + //go:generate ./generate.sh + +And the generate.sh file will look similar to this: + + #!/bin/sh + + alias antlr4='java -Xmx500M -cp "./antlr4-4.12.0-complete.jar:$CLASSPATH" org.antlr.v4.Tool' + antlr4 -Dlanguage=Go -no-visitor -package parser *.g4 + +depending on whether you want visitors or listeners or any other ANTLR options. + +From the command line at the root of your package “myproject” you can then simply issue the command: + + go generate ./... + +# Copyright Notice + +Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. + +Use of this file is governed by the BSD 3-clause license, which can be found in the [LICENSE.txt] file in the project root. + +[target languages]: https://github.com/antlr/antlr4/tree/master/runtime +[LICENSE.txt]: https://github.com/antlr/antlr4/blob/master/LICENSE.txt +*/ +package antlr diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn.go index 98010d2e6e..7ab90fe2c7 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn.go @@ -1,176 +1,176 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import "sync" - -// ATNInvalidAltNumber is used to represent an ALT number that has yet to be calculated or -// which is invalid for a particular struct such as [*antlr.BaseRuleContext] -var ATNInvalidAltNumber int - -// ATN represents an “[Augmented Transition Network]”, though general in ANTLR the term -// “Augmented Recursive Transition Network” though there are some descriptions of “[Recursive Transition Network]” -// in existence. -// -// ATNs represent the main networks in the system and are serialized by the code generator and support [ALL(*)]. -// -// [Augmented Transition Network]: https://en.wikipedia.org/wiki/Augmented_transition_network -// [ALL(*)]: https://www.antlr.org/papers/allstar-techreport.pdf -// [Recursive Transition Network]: https://en.wikipedia.org/wiki/Recursive_transition_network -type ATN struct { - // DecisionToState is the decision points for all rules, subrules, optional - // blocks, ()+, ()*, etc. Each subrule/rule is a decision point, and we must track them so we - // can go back later and build DFA predictors for them. This includes - // all the rules, subrules, optional blocks, ()+, ()* etc... - DecisionToState []DecisionState - - // grammarType is the ATN type and is used for deserializing ATNs from strings. - grammarType int - - // lexerActions is referenced by action transitions in the ATN for lexer ATNs. - lexerActions []LexerAction - - // maxTokenType is the maximum value for any symbol recognized by a transition in the ATN. - maxTokenType int - - modeNameToStartState map[string]*TokensStartState - - modeToStartState []*TokensStartState - - // ruleToStartState maps from rule index to starting state number. - ruleToStartState []*RuleStartState - - // ruleToStopState maps from rule index to stop state number. - ruleToStopState []*RuleStopState - - // ruleToTokenType maps the rule index to the resulting token type for lexer - // ATNs. For parser ATNs, it maps the rule index to the generated bypass token - // type if ATNDeserializationOptions.isGenerateRuleBypassTransitions was - // specified, and otherwise is nil. - ruleToTokenType []int - - states []ATNState - - mu sync.Mutex - stateMu sync.RWMutex - edgeMu sync.RWMutex -} - -// NewATN returns a new ATN struct representing the given grammarType and is used -// for runtime deserialization of ATNs from the code generated by the ANTLR tool -func NewATN(grammarType int, maxTokenType int) *ATN { - return &ATN{ - grammarType: grammarType, - maxTokenType: maxTokenType, - modeNameToStartState: make(map[string]*TokensStartState), - } -} - -// NextTokensInContext computes and returns the set of valid tokens that can occur starting -// in state s. If ctx is nil, the set of tokens will not include what can follow -// the rule surrounding s. In other words, the set will be restricted to tokens -// reachable staying within the rule of s. -func (a *ATN) NextTokensInContext(s ATNState, ctx RuleContext) *IntervalSet { - return NewLL1Analyzer(a).Look(s, nil, ctx) -} - -// NextTokensNoContext computes and returns the set of valid tokens that can occur starting -// in state s and staying in same rule. [antlr.Token.EPSILON] is in set if we reach end of -// rule. -func (a *ATN) NextTokensNoContext(s ATNState) *IntervalSet { - a.mu.Lock() - defer a.mu.Unlock() - iset := s.GetNextTokenWithinRule() - if iset == nil { - iset = a.NextTokensInContext(s, nil) - iset.readOnly = true - s.SetNextTokenWithinRule(iset) - } - return iset -} - -// NextTokens computes and returns the set of valid tokens starting in state s, by -// calling either [NextTokensNoContext] (ctx == nil) or [NextTokensInContext] (ctx != nil). -func (a *ATN) NextTokens(s ATNState, ctx RuleContext) *IntervalSet { - if ctx == nil { - return a.NextTokensNoContext(s) - } - - return a.NextTokensInContext(s, ctx) -} - -func (a *ATN) addState(state ATNState) { - if state != nil { - state.SetATN(a) - state.SetStateNumber(len(a.states)) - } - - a.states = append(a.states, state) -} - -func (a *ATN) removeState(state ATNState) { - a.states[state.GetStateNumber()] = nil // Just free the memory; don't shift states in the slice -} - -func (a *ATN) defineDecisionState(s DecisionState) int { - a.DecisionToState = append(a.DecisionToState, s) - s.setDecision(len(a.DecisionToState) - 1) - - return s.getDecision() -} - -func (a *ATN) getDecisionState(decision int) DecisionState { - if len(a.DecisionToState) == 0 { - return nil - } - - return a.DecisionToState[decision] -} - -// getExpectedTokens computes the set of input symbols which could follow ATN -// state number stateNumber in the specified full parse context ctx and returns -// the set of potentially valid input symbols which could follow the specified -// state in the specified context. This method considers the complete parser -// context, but does not evaluate semantic predicates (i.e. all predicates -// encountered during the calculation are assumed true). If a path in the ATN -// exists from the starting state to the RuleStopState of the outermost context -// without Matching any symbols, Token.EOF is added to the returned set. -// -// A nil ctx defaults to ParserRuleContext.EMPTY. -// -// It panics if the ATN does not contain state stateNumber. -func (a *ATN) getExpectedTokens(stateNumber int, ctx RuleContext) *IntervalSet { - if stateNumber < 0 || stateNumber >= len(a.states) { - panic("Invalid state number.") - } - - s := a.states[stateNumber] - following := a.NextTokens(s, nil) - - if !following.contains(TokenEpsilon) { - return following - } - - expected := NewIntervalSet() - - expected.addSet(following) - expected.removeOne(TokenEpsilon) - - for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) { - invokingState := a.states[ctx.GetInvokingState()] - rt := invokingState.GetTransitions()[0] - - following = a.NextTokens(rt.(*RuleTransition).followState, nil) - expected.addSet(following) - expected.removeOne(TokenEpsilon) - ctx = ctx.GetParent().(RuleContext) - } - - if following.contains(TokenEpsilon) { - expected.addOne(TokenEOF) - } - - return expected -} +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import "sync" + +// ATNInvalidAltNumber is used to represent an ALT number that has yet to be calculated or +// which is invalid for a particular struct such as [*antlr.BaseRuleContext] +var ATNInvalidAltNumber int + +// ATN represents an “[Augmented Transition Network]”, though general in ANTLR the term +// “Augmented Recursive Transition Network” though there are some descriptions of “[Recursive Transition Network]” +// in existence. +// +// ATNs represent the main networks in the system and are serialized by the code generator and support [ALL(*)]. +// +// [Augmented Transition Network]: https://en.wikipedia.org/wiki/Augmented_transition_network +// [ALL(*)]: https://www.antlr.org/papers/allstar-techreport.pdf +// [Recursive Transition Network]: https://en.wikipedia.org/wiki/Recursive_transition_network +type ATN struct { + // DecisionToState is the decision points for all rules, subrules, optional + // blocks, ()+, ()*, etc. Each subrule/rule is a decision point, and we must track them so we + // can go back later and build DFA predictors for them. This includes + // all the rules, subrules, optional blocks, ()+, ()* etc... + DecisionToState []DecisionState + + // grammarType is the ATN type and is used for deserializing ATNs from strings. + grammarType int + + // lexerActions is referenced by action transitions in the ATN for lexer ATNs. + lexerActions []LexerAction + + // maxTokenType is the maximum value for any symbol recognized by a transition in the ATN. + maxTokenType int + + modeNameToStartState map[string]*TokensStartState + + modeToStartState []*TokensStartState + + // ruleToStartState maps from rule index to starting state number. + ruleToStartState []*RuleStartState + + // ruleToStopState maps from rule index to stop state number. + ruleToStopState []*RuleStopState + + // ruleToTokenType maps the rule index to the resulting token type for lexer + // ATNs. For parser ATNs, it maps the rule index to the generated bypass token + // type if ATNDeserializationOptions.isGenerateRuleBypassTransitions was + // specified, and otherwise is nil. + ruleToTokenType []int + + states []ATNState + + mu sync.Mutex + stateMu sync.RWMutex + edgeMu sync.RWMutex +} + +// NewATN returns a new ATN struct representing the given grammarType and is used +// for runtime deserialization of ATNs from the code generated by the ANTLR tool +func NewATN(grammarType int, maxTokenType int) *ATN { + return &ATN{ + grammarType: grammarType, + maxTokenType: maxTokenType, + modeNameToStartState: make(map[string]*TokensStartState), + } +} + +// NextTokensInContext computes and returns the set of valid tokens that can occur starting +// in state s. If ctx is nil, the set of tokens will not include what can follow +// the rule surrounding s. In other words, the set will be restricted to tokens +// reachable staying within the rule of s. +func (a *ATN) NextTokensInContext(s ATNState, ctx RuleContext) *IntervalSet { + return NewLL1Analyzer(a).Look(s, nil, ctx) +} + +// NextTokensNoContext computes and returns the set of valid tokens that can occur starting +// in state s and staying in same rule. [antlr.Token.EPSILON] is in set if we reach end of +// rule. +func (a *ATN) NextTokensNoContext(s ATNState) *IntervalSet { + a.mu.Lock() + defer a.mu.Unlock() + iset := s.GetNextTokenWithinRule() + if iset == nil { + iset = a.NextTokensInContext(s, nil) + iset.readOnly = true + s.SetNextTokenWithinRule(iset) + } + return iset +} + +// NextTokens computes and returns the set of valid tokens starting in state s, by +// calling either [NextTokensNoContext] (ctx == nil) or [NextTokensInContext] (ctx != nil). +func (a *ATN) NextTokens(s ATNState, ctx RuleContext) *IntervalSet { + if ctx == nil { + return a.NextTokensNoContext(s) + } + + return a.NextTokensInContext(s, ctx) +} + +func (a *ATN) addState(state ATNState) { + if state != nil { + state.SetATN(a) + state.SetStateNumber(len(a.states)) + } + + a.states = append(a.states, state) +} + +func (a *ATN) removeState(state ATNState) { + a.states[state.GetStateNumber()] = nil // Just free the memory; don't shift states in the slice +} + +func (a *ATN) defineDecisionState(s DecisionState) int { + a.DecisionToState = append(a.DecisionToState, s) + s.setDecision(len(a.DecisionToState) - 1) + + return s.getDecision() +} + +func (a *ATN) getDecisionState(decision int) DecisionState { + if len(a.DecisionToState) == 0 { + return nil + } + + return a.DecisionToState[decision] +} + +// getExpectedTokens computes the set of input symbols which could follow ATN +// state number stateNumber in the specified full parse context ctx and returns +// the set of potentially valid input symbols which could follow the specified +// state in the specified context. This method considers the complete parser +// context, but does not evaluate semantic predicates (i.e. all predicates +// encountered during the calculation are assumed true). If a path in the ATN +// exists from the starting state to the RuleStopState of the outermost context +// without Matching any symbols, Token.EOF is added to the returned set. +// +// A nil ctx defaults to ParserRuleContext.EMPTY. +// +// It panics if the ATN does not contain state stateNumber. +func (a *ATN) getExpectedTokens(stateNumber int, ctx RuleContext) *IntervalSet { + if stateNumber < 0 || stateNumber >= len(a.states) { + panic("Invalid state number.") + } + + s := a.states[stateNumber] + following := a.NextTokens(s, nil) + + if !following.contains(TokenEpsilon) { + return following + } + + expected := NewIntervalSet() + + expected.addSet(following) + expected.removeOne(TokenEpsilon) + + for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) { + invokingState := a.states[ctx.GetInvokingState()] + rt := invokingState.GetTransitions()[0] + + following = a.NextTokens(rt.(*RuleTransition).followState, nil) + expected.addSet(following) + expected.removeOne(TokenEpsilon) + ctx = ctx.GetParent().(RuleContext) + } + + if following.contains(TokenEpsilon) { + expected.addOne(TokenEOF) + } + + return expected +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config.go index 7619fa172e..5aa55fc8f5 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config.go @@ -1,303 +1,303 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" -) - -// ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic -// context). The syntactic context is a graph-structured stack node whose -// path(s) to the root is the rule invocation(s) chain used to arrive at the -// state. The semantic context is the tree of semantic predicates encountered -// before reaching an ATN state. -type ATNConfig interface { - Equals(o Collectable[ATNConfig]) bool - Hash() int - - GetState() ATNState - GetAlt() int - GetSemanticContext() SemanticContext - - GetContext() PredictionContext - SetContext(PredictionContext) - - GetReachesIntoOuterContext() int - SetReachesIntoOuterContext(int) - - String() string - - getPrecedenceFilterSuppressed() bool - setPrecedenceFilterSuppressed(bool) -} - -type BaseATNConfig struct { - precedenceFilterSuppressed bool - state ATNState - alt int - context PredictionContext - semanticContext SemanticContext - reachesIntoOuterContext int -} - -func NewBaseATNConfig7(old *BaseATNConfig) ATNConfig { // TODO: Dup - return &BaseATNConfig{ - state: old.state, - alt: old.alt, - context: old.context, - semanticContext: old.semanticContext, - reachesIntoOuterContext: old.reachesIntoOuterContext, - } -} - -func NewBaseATNConfig6(state ATNState, alt int, context PredictionContext) *BaseATNConfig { - return NewBaseATNConfig5(state, alt, context, SemanticContextNone) -} - -func NewBaseATNConfig5(state ATNState, alt int, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig { - if semanticContext == nil { - panic("semanticContext cannot be nil") // TODO: Necessary? - } - - return &BaseATNConfig{state: state, alt: alt, context: context, semanticContext: semanticContext} -} - -func NewBaseATNConfig4(c ATNConfig, state ATNState) *BaseATNConfig { - return NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()) -} - -func NewBaseATNConfig3(c ATNConfig, state ATNState, semanticContext SemanticContext) *BaseATNConfig { - return NewBaseATNConfig(c, state, c.GetContext(), semanticContext) -} - -func NewBaseATNConfig2(c ATNConfig, semanticContext SemanticContext) *BaseATNConfig { - return NewBaseATNConfig(c, c.GetState(), c.GetContext(), semanticContext) -} - -func NewBaseATNConfig1(c ATNConfig, state ATNState, context PredictionContext) *BaseATNConfig { - return NewBaseATNConfig(c, state, context, c.GetSemanticContext()) -} - -func NewBaseATNConfig(c ATNConfig, state ATNState, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig { - if semanticContext == nil { - panic("semanticContext cannot be nil") - } - - return &BaseATNConfig{ - state: state, - alt: c.GetAlt(), - context: context, - semanticContext: semanticContext, - reachesIntoOuterContext: c.GetReachesIntoOuterContext(), - precedenceFilterSuppressed: c.getPrecedenceFilterSuppressed(), - } -} - -func (b *BaseATNConfig) getPrecedenceFilterSuppressed() bool { - return b.precedenceFilterSuppressed -} - -func (b *BaseATNConfig) setPrecedenceFilterSuppressed(v bool) { - b.precedenceFilterSuppressed = v -} - -func (b *BaseATNConfig) GetState() ATNState { - return b.state -} - -func (b *BaseATNConfig) GetAlt() int { - return b.alt -} - -func (b *BaseATNConfig) SetContext(v PredictionContext) { - b.context = v -} -func (b *BaseATNConfig) GetContext() PredictionContext { - return b.context -} - -func (b *BaseATNConfig) GetSemanticContext() SemanticContext { - return b.semanticContext -} - -func (b *BaseATNConfig) GetReachesIntoOuterContext() int { - return b.reachesIntoOuterContext -} - -func (b *BaseATNConfig) SetReachesIntoOuterContext(v int) { - b.reachesIntoOuterContext = v -} - -// Equals is the default comparison function for an ATNConfig when no specialist implementation is required -// for a collection. -// -// An ATN configuration is equal to another if both have the same state, they -// predict the same alternative, and syntactic/semantic contexts are the same. -func (b *BaseATNConfig) Equals(o Collectable[ATNConfig]) bool { - if b == o { - return true - } else if o == nil { - return false - } - - var other, ok = o.(*BaseATNConfig) - - if !ok { - return false - } - - var equal bool - - if b.context == nil { - equal = other.context == nil - } else { - equal = b.context.Equals(other.context) - } - - var ( - nums = b.state.GetStateNumber() == other.state.GetStateNumber() - alts = b.alt == other.alt - cons = b.semanticContext.Equals(other.semanticContext) - sups = b.precedenceFilterSuppressed == other.precedenceFilterSuppressed - ) - - return nums && alts && cons && sups && equal -} - -// Hash is the default hash function for BaseATNConfig, when no specialist hash function -// is required for a collection -func (b *BaseATNConfig) Hash() int { - var c int - if b.context != nil { - c = b.context.Hash() - } - - h := murmurInit(7) - h = murmurUpdate(h, b.state.GetStateNumber()) - h = murmurUpdate(h, b.alt) - h = murmurUpdate(h, c) - h = murmurUpdate(h, b.semanticContext.Hash()) - return murmurFinish(h, 4) -} - -func (b *BaseATNConfig) String() string { - var s1, s2, s3 string - - if b.context != nil { - s1 = ",[" + fmt.Sprint(b.context) + "]" - } - - if b.semanticContext != SemanticContextNone { - s2 = "," + fmt.Sprint(b.semanticContext) - } - - if b.reachesIntoOuterContext > 0 { - s3 = ",up=" + fmt.Sprint(b.reachesIntoOuterContext) - } - - return fmt.Sprintf("(%v,%v%v%v%v)", b.state, b.alt, s1, s2, s3) -} - -type LexerATNConfig struct { - *BaseATNConfig - lexerActionExecutor *LexerActionExecutor - passedThroughNonGreedyDecision bool -} - -func NewLexerATNConfig6(state ATNState, alt int, context PredictionContext) *LexerATNConfig { - return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)} -} - -func NewLexerATNConfig5(state ATNState, alt int, context PredictionContext, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig { - return &LexerATNConfig{ - BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone), - lexerActionExecutor: lexerActionExecutor, - } -} - -func NewLexerATNConfig4(c *LexerATNConfig, state ATNState) *LexerATNConfig { - return &LexerATNConfig{ - BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()), - lexerActionExecutor: c.lexerActionExecutor, - passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state), - } -} - -func NewLexerATNConfig3(c *LexerATNConfig, state ATNState, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig { - return &LexerATNConfig{ - BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()), - lexerActionExecutor: lexerActionExecutor, - passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state), - } -} - -func NewLexerATNConfig2(c *LexerATNConfig, state ATNState, context PredictionContext) *LexerATNConfig { - return &LexerATNConfig{ - BaseATNConfig: NewBaseATNConfig(c, state, context, c.GetSemanticContext()), - lexerActionExecutor: c.lexerActionExecutor, - passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state), - } -} - -func NewLexerATNConfig1(state ATNState, alt int, context PredictionContext) *LexerATNConfig { - return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)} -} - -// Hash is the default hash function for LexerATNConfig objects, it can be used directly or via -// the default comparator [ObjEqComparator]. -func (l *LexerATNConfig) Hash() int { - var f int - if l.passedThroughNonGreedyDecision { - f = 1 - } else { - f = 0 - } - h := murmurInit(7) - h = murmurUpdate(h, l.state.GetStateNumber()) - h = murmurUpdate(h, l.alt) - h = murmurUpdate(h, l.context.Hash()) - h = murmurUpdate(h, l.semanticContext.Hash()) - h = murmurUpdate(h, f) - h = murmurUpdate(h, l.lexerActionExecutor.Hash()) - h = murmurFinish(h, 6) - return h -} - -// Equals is the default comparison function for LexerATNConfig objects, it can be used directly or via -// the default comparator [ObjEqComparator]. -func (l *LexerATNConfig) Equals(other Collectable[ATNConfig]) bool { - if l == other { - return true - } - var othert, ok = other.(*LexerATNConfig) - - if l == other { - return true - } else if !ok { - return false - } else if l.passedThroughNonGreedyDecision != othert.passedThroughNonGreedyDecision { - return false - } - - var b bool - - if l.lexerActionExecutor != nil { - b = !l.lexerActionExecutor.Equals(othert.lexerActionExecutor) - } else { - b = othert.lexerActionExecutor != nil - } - - if b { - return false - } - - return l.BaseATNConfig.Equals(othert.BaseATNConfig) -} - -func checkNonGreedyDecision(source *LexerATNConfig, target ATNState) bool { - var ds, ok = target.(DecisionState) - - return source.passedThroughNonGreedyDecision || (ok && ds.getNonGreedy()) -} +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" +) + +// ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic +// context). The syntactic context is a graph-structured stack node whose +// path(s) to the root is the rule invocation(s) chain used to arrive at the +// state. The semantic context is the tree of semantic predicates encountered +// before reaching an ATN state. +type ATNConfig interface { + Equals(o Collectable[ATNConfig]) bool + Hash() int + + GetState() ATNState + GetAlt() int + GetSemanticContext() SemanticContext + + GetContext() PredictionContext + SetContext(PredictionContext) + + GetReachesIntoOuterContext() int + SetReachesIntoOuterContext(int) + + String() string + + getPrecedenceFilterSuppressed() bool + setPrecedenceFilterSuppressed(bool) +} + +type BaseATNConfig struct { + precedenceFilterSuppressed bool + state ATNState + alt int + context PredictionContext + semanticContext SemanticContext + reachesIntoOuterContext int +} + +func NewBaseATNConfig7(old *BaseATNConfig) ATNConfig { // TODO: Dup + return &BaseATNConfig{ + state: old.state, + alt: old.alt, + context: old.context, + semanticContext: old.semanticContext, + reachesIntoOuterContext: old.reachesIntoOuterContext, + } +} + +func NewBaseATNConfig6(state ATNState, alt int, context PredictionContext) *BaseATNConfig { + return NewBaseATNConfig5(state, alt, context, SemanticContextNone) +} + +func NewBaseATNConfig5(state ATNState, alt int, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig { + if semanticContext == nil { + panic("semanticContext cannot be nil") // TODO: Necessary? + } + + return &BaseATNConfig{state: state, alt: alt, context: context, semanticContext: semanticContext} +} + +func NewBaseATNConfig4(c ATNConfig, state ATNState) *BaseATNConfig { + return NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()) +} + +func NewBaseATNConfig3(c ATNConfig, state ATNState, semanticContext SemanticContext) *BaseATNConfig { + return NewBaseATNConfig(c, state, c.GetContext(), semanticContext) +} + +func NewBaseATNConfig2(c ATNConfig, semanticContext SemanticContext) *BaseATNConfig { + return NewBaseATNConfig(c, c.GetState(), c.GetContext(), semanticContext) +} + +func NewBaseATNConfig1(c ATNConfig, state ATNState, context PredictionContext) *BaseATNConfig { + return NewBaseATNConfig(c, state, context, c.GetSemanticContext()) +} + +func NewBaseATNConfig(c ATNConfig, state ATNState, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig { + if semanticContext == nil { + panic("semanticContext cannot be nil") + } + + return &BaseATNConfig{ + state: state, + alt: c.GetAlt(), + context: context, + semanticContext: semanticContext, + reachesIntoOuterContext: c.GetReachesIntoOuterContext(), + precedenceFilterSuppressed: c.getPrecedenceFilterSuppressed(), + } +} + +func (b *BaseATNConfig) getPrecedenceFilterSuppressed() bool { + return b.precedenceFilterSuppressed +} + +func (b *BaseATNConfig) setPrecedenceFilterSuppressed(v bool) { + b.precedenceFilterSuppressed = v +} + +func (b *BaseATNConfig) GetState() ATNState { + return b.state +} + +func (b *BaseATNConfig) GetAlt() int { + return b.alt +} + +func (b *BaseATNConfig) SetContext(v PredictionContext) { + b.context = v +} +func (b *BaseATNConfig) GetContext() PredictionContext { + return b.context +} + +func (b *BaseATNConfig) GetSemanticContext() SemanticContext { + return b.semanticContext +} + +func (b *BaseATNConfig) GetReachesIntoOuterContext() int { + return b.reachesIntoOuterContext +} + +func (b *BaseATNConfig) SetReachesIntoOuterContext(v int) { + b.reachesIntoOuterContext = v +} + +// Equals is the default comparison function for an ATNConfig when no specialist implementation is required +// for a collection. +// +// An ATN configuration is equal to another if both have the same state, they +// predict the same alternative, and syntactic/semantic contexts are the same. +func (b *BaseATNConfig) Equals(o Collectable[ATNConfig]) bool { + if b == o { + return true + } else if o == nil { + return false + } + + var other, ok = o.(*BaseATNConfig) + + if !ok { + return false + } + + var equal bool + + if b.context == nil { + equal = other.context == nil + } else { + equal = b.context.Equals(other.context) + } + + var ( + nums = b.state.GetStateNumber() == other.state.GetStateNumber() + alts = b.alt == other.alt + cons = b.semanticContext.Equals(other.semanticContext) + sups = b.precedenceFilterSuppressed == other.precedenceFilterSuppressed + ) + + return nums && alts && cons && sups && equal +} + +// Hash is the default hash function for BaseATNConfig, when no specialist hash function +// is required for a collection +func (b *BaseATNConfig) Hash() int { + var c int + if b.context != nil { + c = b.context.Hash() + } + + h := murmurInit(7) + h = murmurUpdate(h, b.state.GetStateNumber()) + h = murmurUpdate(h, b.alt) + h = murmurUpdate(h, c) + h = murmurUpdate(h, b.semanticContext.Hash()) + return murmurFinish(h, 4) +} + +func (b *BaseATNConfig) String() string { + var s1, s2, s3 string + + if b.context != nil { + s1 = ",[" + fmt.Sprint(b.context) + "]" + } + + if b.semanticContext != SemanticContextNone { + s2 = "," + fmt.Sprint(b.semanticContext) + } + + if b.reachesIntoOuterContext > 0 { + s3 = ",up=" + fmt.Sprint(b.reachesIntoOuterContext) + } + + return fmt.Sprintf("(%v,%v%v%v%v)", b.state, b.alt, s1, s2, s3) +} + +type LexerATNConfig struct { + *BaseATNConfig + lexerActionExecutor *LexerActionExecutor + passedThroughNonGreedyDecision bool +} + +func NewLexerATNConfig6(state ATNState, alt int, context PredictionContext) *LexerATNConfig { + return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)} +} + +func NewLexerATNConfig5(state ATNState, alt int, context PredictionContext, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig { + return &LexerATNConfig{ + BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone), + lexerActionExecutor: lexerActionExecutor, + } +} + +func NewLexerATNConfig4(c *LexerATNConfig, state ATNState) *LexerATNConfig { + return &LexerATNConfig{ + BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()), + lexerActionExecutor: c.lexerActionExecutor, + passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state), + } +} + +func NewLexerATNConfig3(c *LexerATNConfig, state ATNState, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig { + return &LexerATNConfig{ + BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()), + lexerActionExecutor: lexerActionExecutor, + passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state), + } +} + +func NewLexerATNConfig2(c *LexerATNConfig, state ATNState, context PredictionContext) *LexerATNConfig { + return &LexerATNConfig{ + BaseATNConfig: NewBaseATNConfig(c, state, context, c.GetSemanticContext()), + lexerActionExecutor: c.lexerActionExecutor, + passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state), + } +} + +func NewLexerATNConfig1(state ATNState, alt int, context PredictionContext) *LexerATNConfig { + return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)} +} + +// Hash is the default hash function for LexerATNConfig objects, it can be used directly or via +// the default comparator [ObjEqComparator]. +func (l *LexerATNConfig) Hash() int { + var f int + if l.passedThroughNonGreedyDecision { + f = 1 + } else { + f = 0 + } + h := murmurInit(7) + h = murmurUpdate(h, l.state.GetStateNumber()) + h = murmurUpdate(h, l.alt) + h = murmurUpdate(h, l.context.Hash()) + h = murmurUpdate(h, l.semanticContext.Hash()) + h = murmurUpdate(h, f) + h = murmurUpdate(h, l.lexerActionExecutor.Hash()) + h = murmurFinish(h, 6) + return h +} + +// Equals is the default comparison function for LexerATNConfig objects, it can be used directly or via +// the default comparator [ObjEqComparator]. +func (l *LexerATNConfig) Equals(other Collectable[ATNConfig]) bool { + if l == other { + return true + } + var othert, ok = other.(*LexerATNConfig) + + if l == other { + return true + } else if !ok { + return false + } else if l.passedThroughNonGreedyDecision != othert.passedThroughNonGreedyDecision { + return false + } + + var b bool + + if l.lexerActionExecutor != nil { + b = !l.lexerActionExecutor.Equals(othert.lexerActionExecutor) + } else { + b = othert.lexerActionExecutor != nil + } + + if b { + return false + } + + return l.BaseATNConfig.Equals(othert.BaseATNConfig) +} + +func checkNonGreedyDecision(source *LexerATNConfig, target ATNState) bool { + var ds, ok = target.(DecisionState) + + return source.passedThroughNonGreedyDecision || (ok && ds.getNonGreedy()) +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config_set.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config_set.go index 43e9b33f3b..584cc9cabe 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config_set.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_config_set.go @@ -1,441 +1,441 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" -) - -type ATNConfigSet interface { - Hash() int - Equals(o Collectable[ATNConfig]) bool - Add(ATNConfig, *DoubleDict) bool - AddAll([]ATNConfig) bool - - GetStates() *JStore[ATNState, Comparator[ATNState]] - GetPredicates() []SemanticContext - GetItems() []ATNConfig - - OptimizeConfigs(interpreter *BaseATNSimulator) - - Length() int - IsEmpty() bool - Contains(ATNConfig) bool - ContainsFast(ATNConfig) bool - Clear() - String() string - - HasSemanticContext() bool - SetHasSemanticContext(v bool) - - ReadOnly() bool - SetReadOnly(bool) - - GetConflictingAlts() *BitSet - SetConflictingAlts(*BitSet) - - Alts() *BitSet - - FullContext() bool - - GetUniqueAlt() int - SetUniqueAlt(int) - - GetDipsIntoOuterContext() bool - SetDipsIntoOuterContext(bool) -} - -// BaseATNConfigSet is a specialized set of ATNConfig that tracks information -// about its elements and can combine similar configurations using a -// graph-structured stack. -type BaseATNConfigSet struct { - cachedHash int - - // configLookup is used to determine whether two BaseATNConfigSets are equal. We - // need all configurations with the same (s, i, _, semctx) to be equal. A key - // effectively doubles the number of objects associated with ATNConfigs. All - // keys are hashed by (s, i, _, pi), not including the context. Wiped out when - // read-only because a set becomes a DFA state. - configLookup *JStore[ATNConfig, Comparator[ATNConfig]] - - // configs is the added elements. - configs []ATNConfig - - // TODO: These fields make me pretty uncomfortable, but it is nice to pack up - // info together because it saves recomputation. Can we track conflicts as they - // are added to save scanning configs later? - conflictingAlts *BitSet - - // dipsIntoOuterContext is used by parsers and lexers. In a lexer, it indicates - // we hit a pred while computing a closure operation. Do not make a DFA state - // from the BaseATNConfigSet in this case. TODO: How is this used by parsers? - dipsIntoOuterContext bool - - // fullCtx is whether it is part of a full context LL prediction. Used to - // determine how to merge $. It is a wildcard with SLL, but not for an LL - // context merge. - fullCtx bool - - // Used in parser and lexer. In lexer, it indicates we hit a pred - // while computing a closure operation. Don't make a DFA state from a. - hasSemanticContext bool - - // readOnly is whether it is read-only. Do not - // allow any code to manipulate the set if true because DFA states will point at - // sets and those must not change. It not, protect other fields; conflictingAlts - // in particular, which is assigned after readOnly. - readOnly bool - - // TODO: These fields make me pretty uncomfortable, but it is nice to pack up - // info together because it saves recomputation. Can we track conflicts as they - // are added to save scanning configs later? - uniqueAlt int -} - -func (b *BaseATNConfigSet) Alts() *BitSet { - alts := NewBitSet() - for _, it := range b.configs { - alts.add(it.GetAlt()) - } - return alts -} - -func NewBaseATNConfigSet(fullCtx bool) *BaseATNConfigSet { - return &BaseATNConfigSet{ - cachedHash: -1, - configLookup: NewJStore[ATNConfig, Comparator[ATNConfig]](aConfCompInst), - fullCtx: fullCtx, - } -} - -// Add merges contexts with existing configs for (s, i, pi, _), where s is the -// ATNConfig.state, i is the ATNConfig.alt, and pi is the -// ATNConfig.semanticContext. We use (s,i,pi) as the key. Updates -// dipsIntoOuterContext and hasSemanticContext when necessary. -func (b *BaseATNConfigSet) Add(config ATNConfig, mergeCache *DoubleDict) bool { - if b.readOnly { - panic("set is read-only") - } - - if config.GetSemanticContext() != SemanticContextNone { - b.hasSemanticContext = true - } - - if config.GetReachesIntoOuterContext() > 0 { - b.dipsIntoOuterContext = true - } - - existing, present := b.configLookup.Put(config) - - // The config was not already in the set - // - if !present { - b.cachedHash = -1 - b.configs = append(b.configs, config) // Track order here - return true - } - - // Merge a previous (s, i, pi, _) with it and save the result - rootIsWildcard := !b.fullCtx - merged := merge(existing.GetContext(), config.GetContext(), rootIsWildcard, mergeCache) - - // No need to check for existing.context because config.context is in the cache, - // since the only way to create new graphs is the "call rule" and here. We cache - // at both places. - existing.SetReachesIntoOuterContext(intMax(existing.GetReachesIntoOuterContext(), config.GetReachesIntoOuterContext())) - - // Preserve the precedence filter suppression during the merge - if config.getPrecedenceFilterSuppressed() { - existing.setPrecedenceFilterSuppressed(true) - } - - // Replace the context because there is no need to do alt mapping - existing.SetContext(merged) - - return true -} - -func (b *BaseATNConfigSet) GetStates() *JStore[ATNState, Comparator[ATNState]] { - - // states uses the standard comparator provided by the ATNState instance - // - states := NewJStore[ATNState, Comparator[ATNState]](aStateEqInst) - - for i := 0; i < len(b.configs); i++ { - states.Put(b.configs[i].GetState()) - } - - return states -} - -func (b *BaseATNConfigSet) HasSemanticContext() bool { - return b.hasSemanticContext -} - -func (b *BaseATNConfigSet) SetHasSemanticContext(v bool) { - b.hasSemanticContext = v -} - -func (b *BaseATNConfigSet) GetPredicates() []SemanticContext { - preds := make([]SemanticContext, 0) - - for i := 0; i < len(b.configs); i++ { - c := b.configs[i].GetSemanticContext() - - if c != SemanticContextNone { - preds = append(preds, c) - } - } - - return preds -} - -func (b *BaseATNConfigSet) GetItems() []ATNConfig { - return b.configs -} - -func (b *BaseATNConfigSet) OptimizeConfigs(interpreter *BaseATNSimulator) { - if b.readOnly { - panic("set is read-only") - } - - if b.configLookup.Len() == 0 { - return - } - - for i := 0; i < len(b.configs); i++ { - config := b.configs[i] - - config.SetContext(interpreter.getCachedContext(config.GetContext())) - } -} - -func (b *BaseATNConfigSet) AddAll(coll []ATNConfig) bool { - for i := 0; i < len(coll); i++ { - b.Add(coll[i], nil) - } - - return false -} - -// Compare is a hack function just to verify that adding DFAstares to the known -// set works, so long as comparison of ATNConfigSet s works. For that to work, we -// need to make sure that the set of ATNConfigs in two sets are equivalent. We can't -// know the order, so we do this inefficient hack. If this proves the point, then -// we can change the config set to a better structure. -func (b *BaseATNConfigSet) Compare(bs *BaseATNConfigSet) bool { - if len(b.configs) != len(bs.configs) { - return false - } - - for _, c := range b.configs { - found := false - for _, c2 := range bs.configs { - if c.Equals(c2) { - found = true - break - } - } - - if !found { - return false - } - - } - return true -} - -func (b *BaseATNConfigSet) Equals(other Collectable[ATNConfig]) bool { - if b == other { - return true - } else if _, ok := other.(*BaseATNConfigSet); !ok { - return false - } - - other2 := other.(*BaseATNConfigSet) - - return b.configs != nil && - b.fullCtx == other2.fullCtx && - b.uniqueAlt == other2.uniqueAlt && - b.conflictingAlts == other2.conflictingAlts && - b.hasSemanticContext == other2.hasSemanticContext && - b.dipsIntoOuterContext == other2.dipsIntoOuterContext && - b.Compare(other2) -} - -func (b *BaseATNConfigSet) Hash() int { - if b.readOnly { - if b.cachedHash == -1 { - b.cachedHash = b.hashCodeConfigs() - } - - return b.cachedHash - } - - return b.hashCodeConfigs() -} - -func (b *BaseATNConfigSet) hashCodeConfigs() int { - h := 1 - for _, config := range b.configs { - h = 31*h + config.Hash() - } - return h -} - -func (b *BaseATNConfigSet) Length() int { - return len(b.configs) -} - -func (b *BaseATNConfigSet) IsEmpty() bool { - return len(b.configs) == 0 -} - -func (b *BaseATNConfigSet) Contains(item ATNConfig) bool { - if b.configLookup == nil { - panic("not implemented for read-only sets") - } - - return b.configLookup.Contains(item) -} - -func (b *BaseATNConfigSet) ContainsFast(item ATNConfig) bool { - if b.configLookup == nil { - panic("not implemented for read-only sets") - } - - return b.configLookup.Contains(item) // TODO: containsFast is not implemented for Set -} - -func (b *BaseATNConfigSet) Clear() { - if b.readOnly { - panic("set is read-only") - } - - b.configs = make([]ATNConfig, 0) - b.cachedHash = -1 - b.configLookup = NewJStore[ATNConfig, Comparator[ATNConfig]](atnConfCompInst) -} - -func (b *BaseATNConfigSet) FullContext() bool { - return b.fullCtx -} - -func (b *BaseATNConfigSet) GetDipsIntoOuterContext() bool { - return b.dipsIntoOuterContext -} - -func (b *BaseATNConfigSet) SetDipsIntoOuterContext(v bool) { - b.dipsIntoOuterContext = v -} - -func (b *BaseATNConfigSet) GetUniqueAlt() int { - return b.uniqueAlt -} - -func (b *BaseATNConfigSet) SetUniqueAlt(v int) { - b.uniqueAlt = v -} - -func (b *BaseATNConfigSet) GetConflictingAlts() *BitSet { - return b.conflictingAlts -} - -func (b *BaseATNConfigSet) SetConflictingAlts(v *BitSet) { - b.conflictingAlts = v -} - -func (b *BaseATNConfigSet) ReadOnly() bool { - return b.readOnly -} - -func (b *BaseATNConfigSet) SetReadOnly(readOnly bool) { - b.readOnly = readOnly - - if readOnly { - b.configLookup = nil // Read only, so no need for the lookup cache - } -} - -func (b *BaseATNConfigSet) String() string { - s := "[" - - for i, c := range b.configs { - s += c.String() - - if i != len(b.configs)-1 { - s += ", " - } - } - - s += "]" - - if b.hasSemanticContext { - s += ",hasSemanticContext=" + fmt.Sprint(b.hasSemanticContext) - } - - if b.uniqueAlt != ATNInvalidAltNumber { - s += ",uniqueAlt=" + fmt.Sprint(b.uniqueAlt) - } - - if b.conflictingAlts != nil { - s += ",conflictingAlts=" + b.conflictingAlts.String() - } - - if b.dipsIntoOuterContext { - s += ",dipsIntoOuterContext" - } - - return s -} - -type OrderedATNConfigSet struct { - *BaseATNConfigSet -} - -func NewOrderedATNConfigSet() *OrderedATNConfigSet { - b := NewBaseATNConfigSet(false) - - // This set uses the standard Hash() and Equals() from ATNConfig - b.configLookup = NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst) - - return &OrderedATNConfigSet{BaseATNConfigSet: b} -} - -func hashATNConfig(i interface{}) int { - o := i.(ATNConfig) - hash := 7 - hash = 31*hash + o.GetState().GetStateNumber() - hash = 31*hash + o.GetAlt() - hash = 31*hash + o.GetSemanticContext().Hash() - return hash -} - -func equalATNConfigs(a, b interface{}) bool { - if a == nil || b == nil { - return false - } - - if a == b { - return true - } - - var ai, ok = a.(ATNConfig) - var bi, ok1 = b.(ATNConfig) - - if !ok || !ok1 { - return false - } - - if ai.GetState().GetStateNumber() != bi.GetState().GetStateNumber() { - return false - } - - if ai.GetAlt() != bi.GetAlt() { - return false - } - - return ai.GetSemanticContext().Equals(bi.GetSemanticContext()) -} +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" +) + +type ATNConfigSet interface { + Hash() int + Equals(o Collectable[ATNConfig]) bool + Add(ATNConfig, *DoubleDict) bool + AddAll([]ATNConfig) bool + + GetStates() *JStore[ATNState, Comparator[ATNState]] + GetPredicates() []SemanticContext + GetItems() []ATNConfig + + OptimizeConfigs(interpreter *BaseATNSimulator) + + Length() int + IsEmpty() bool + Contains(ATNConfig) bool + ContainsFast(ATNConfig) bool + Clear() + String() string + + HasSemanticContext() bool + SetHasSemanticContext(v bool) + + ReadOnly() bool + SetReadOnly(bool) + + GetConflictingAlts() *BitSet + SetConflictingAlts(*BitSet) + + Alts() *BitSet + + FullContext() bool + + GetUniqueAlt() int + SetUniqueAlt(int) + + GetDipsIntoOuterContext() bool + SetDipsIntoOuterContext(bool) +} + +// BaseATNConfigSet is a specialized set of ATNConfig that tracks information +// about its elements and can combine similar configurations using a +// graph-structured stack. +type BaseATNConfigSet struct { + cachedHash int + + // configLookup is used to determine whether two BaseATNConfigSets are equal. We + // need all configurations with the same (s, i, _, semctx) to be equal. A key + // effectively doubles the number of objects associated with ATNConfigs. All + // keys are hashed by (s, i, _, pi), not including the context. Wiped out when + // read-only because a set becomes a DFA state. + configLookup *JStore[ATNConfig, Comparator[ATNConfig]] + + // configs is the added elements. + configs []ATNConfig + + // TODO: These fields make me pretty uncomfortable, but it is nice to pack up + // info together because it saves recomputation. Can we track conflicts as they + // are added to save scanning configs later? + conflictingAlts *BitSet + + // dipsIntoOuterContext is used by parsers and lexers. In a lexer, it indicates + // we hit a pred while computing a closure operation. Do not make a DFA state + // from the BaseATNConfigSet in this case. TODO: How is this used by parsers? + dipsIntoOuterContext bool + + // fullCtx is whether it is part of a full context LL prediction. Used to + // determine how to merge $. It is a wildcard with SLL, but not for an LL + // context merge. + fullCtx bool + + // Used in parser and lexer. In lexer, it indicates we hit a pred + // while computing a closure operation. Don't make a DFA state from a. + hasSemanticContext bool + + // readOnly is whether it is read-only. Do not + // allow any code to manipulate the set if true because DFA states will point at + // sets and those must not change. It not, protect other fields; conflictingAlts + // in particular, which is assigned after readOnly. + readOnly bool + + // TODO: These fields make me pretty uncomfortable, but it is nice to pack up + // info together because it saves recomputation. Can we track conflicts as they + // are added to save scanning configs later? + uniqueAlt int +} + +func (b *BaseATNConfigSet) Alts() *BitSet { + alts := NewBitSet() + for _, it := range b.configs { + alts.add(it.GetAlt()) + } + return alts +} + +func NewBaseATNConfigSet(fullCtx bool) *BaseATNConfigSet { + return &BaseATNConfigSet{ + cachedHash: -1, + configLookup: NewJStore[ATNConfig, Comparator[ATNConfig]](aConfCompInst), + fullCtx: fullCtx, + } +} + +// Add merges contexts with existing configs for (s, i, pi, _), where s is the +// ATNConfig.state, i is the ATNConfig.alt, and pi is the +// ATNConfig.semanticContext. We use (s,i,pi) as the key. Updates +// dipsIntoOuterContext and hasSemanticContext when necessary. +func (b *BaseATNConfigSet) Add(config ATNConfig, mergeCache *DoubleDict) bool { + if b.readOnly { + panic("set is read-only") + } + + if config.GetSemanticContext() != SemanticContextNone { + b.hasSemanticContext = true + } + + if config.GetReachesIntoOuterContext() > 0 { + b.dipsIntoOuterContext = true + } + + existing, present := b.configLookup.Put(config) + + // The config was not already in the set + // + if !present { + b.cachedHash = -1 + b.configs = append(b.configs, config) // Track order here + return true + } + + // Merge a previous (s, i, pi, _) with it and save the result + rootIsWildcard := !b.fullCtx + merged := merge(existing.GetContext(), config.GetContext(), rootIsWildcard, mergeCache) + + // No need to check for existing.context because config.context is in the cache, + // since the only way to create new graphs is the "call rule" and here. We cache + // at both places. + existing.SetReachesIntoOuterContext(intMax(existing.GetReachesIntoOuterContext(), config.GetReachesIntoOuterContext())) + + // Preserve the precedence filter suppression during the merge + if config.getPrecedenceFilterSuppressed() { + existing.setPrecedenceFilterSuppressed(true) + } + + // Replace the context because there is no need to do alt mapping + existing.SetContext(merged) + + return true +} + +func (b *BaseATNConfigSet) GetStates() *JStore[ATNState, Comparator[ATNState]] { + + // states uses the standard comparator provided by the ATNState instance + // + states := NewJStore[ATNState, Comparator[ATNState]](aStateEqInst) + + for i := 0; i < len(b.configs); i++ { + states.Put(b.configs[i].GetState()) + } + + return states +} + +func (b *BaseATNConfigSet) HasSemanticContext() bool { + return b.hasSemanticContext +} + +func (b *BaseATNConfigSet) SetHasSemanticContext(v bool) { + b.hasSemanticContext = v +} + +func (b *BaseATNConfigSet) GetPredicates() []SemanticContext { + preds := make([]SemanticContext, 0) + + for i := 0; i < len(b.configs); i++ { + c := b.configs[i].GetSemanticContext() + + if c != SemanticContextNone { + preds = append(preds, c) + } + } + + return preds +} + +func (b *BaseATNConfigSet) GetItems() []ATNConfig { + return b.configs +} + +func (b *BaseATNConfigSet) OptimizeConfigs(interpreter *BaseATNSimulator) { + if b.readOnly { + panic("set is read-only") + } + + if b.configLookup.Len() == 0 { + return + } + + for i := 0; i < len(b.configs); i++ { + config := b.configs[i] + + config.SetContext(interpreter.getCachedContext(config.GetContext())) + } +} + +func (b *BaseATNConfigSet) AddAll(coll []ATNConfig) bool { + for i := 0; i < len(coll); i++ { + b.Add(coll[i], nil) + } + + return false +} + +// Compare is a hack function just to verify that adding DFAstares to the known +// set works, so long as comparison of ATNConfigSet s works. For that to work, we +// need to make sure that the set of ATNConfigs in two sets are equivalent. We can't +// know the order, so we do this inefficient hack. If this proves the point, then +// we can change the config set to a better structure. +func (b *BaseATNConfigSet) Compare(bs *BaseATNConfigSet) bool { + if len(b.configs) != len(bs.configs) { + return false + } + + for _, c := range b.configs { + found := false + for _, c2 := range bs.configs { + if c.Equals(c2) { + found = true + break + } + } + + if !found { + return false + } + + } + return true +} + +func (b *BaseATNConfigSet) Equals(other Collectable[ATNConfig]) bool { + if b == other { + return true + } else if _, ok := other.(*BaseATNConfigSet); !ok { + return false + } + + other2 := other.(*BaseATNConfigSet) + + return b.configs != nil && + b.fullCtx == other2.fullCtx && + b.uniqueAlt == other2.uniqueAlt && + b.conflictingAlts == other2.conflictingAlts && + b.hasSemanticContext == other2.hasSemanticContext && + b.dipsIntoOuterContext == other2.dipsIntoOuterContext && + b.Compare(other2) +} + +func (b *BaseATNConfigSet) Hash() int { + if b.readOnly { + if b.cachedHash == -1 { + b.cachedHash = b.hashCodeConfigs() + } + + return b.cachedHash + } + + return b.hashCodeConfigs() +} + +func (b *BaseATNConfigSet) hashCodeConfigs() int { + h := 1 + for _, config := range b.configs { + h = 31*h + config.Hash() + } + return h +} + +func (b *BaseATNConfigSet) Length() int { + return len(b.configs) +} + +func (b *BaseATNConfigSet) IsEmpty() bool { + return len(b.configs) == 0 +} + +func (b *BaseATNConfigSet) Contains(item ATNConfig) bool { + if b.configLookup == nil { + panic("not implemented for read-only sets") + } + + return b.configLookup.Contains(item) +} + +func (b *BaseATNConfigSet) ContainsFast(item ATNConfig) bool { + if b.configLookup == nil { + panic("not implemented for read-only sets") + } + + return b.configLookup.Contains(item) // TODO: containsFast is not implemented for Set +} + +func (b *BaseATNConfigSet) Clear() { + if b.readOnly { + panic("set is read-only") + } + + b.configs = make([]ATNConfig, 0) + b.cachedHash = -1 + b.configLookup = NewJStore[ATNConfig, Comparator[ATNConfig]](atnConfCompInst) +} + +func (b *BaseATNConfigSet) FullContext() bool { + return b.fullCtx +} + +func (b *BaseATNConfigSet) GetDipsIntoOuterContext() bool { + return b.dipsIntoOuterContext +} + +func (b *BaseATNConfigSet) SetDipsIntoOuterContext(v bool) { + b.dipsIntoOuterContext = v +} + +func (b *BaseATNConfigSet) GetUniqueAlt() int { + return b.uniqueAlt +} + +func (b *BaseATNConfigSet) SetUniqueAlt(v int) { + b.uniqueAlt = v +} + +func (b *BaseATNConfigSet) GetConflictingAlts() *BitSet { + return b.conflictingAlts +} + +func (b *BaseATNConfigSet) SetConflictingAlts(v *BitSet) { + b.conflictingAlts = v +} + +func (b *BaseATNConfigSet) ReadOnly() bool { + return b.readOnly +} + +func (b *BaseATNConfigSet) SetReadOnly(readOnly bool) { + b.readOnly = readOnly + + if readOnly { + b.configLookup = nil // Read only, so no need for the lookup cache + } +} + +func (b *BaseATNConfigSet) String() string { + s := "[" + + for i, c := range b.configs { + s += c.String() + + if i != len(b.configs)-1 { + s += ", " + } + } + + s += "]" + + if b.hasSemanticContext { + s += ",hasSemanticContext=" + fmt.Sprint(b.hasSemanticContext) + } + + if b.uniqueAlt != ATNInvalidAltNumber { + s += ",uniqueAlt=" + fmt.Sprint(b.uniqueAlt) + } + + if b.conflictingAlts != nil { + s += ",conflictingAlts=" + b.conflictingAlts.String() + } + + if b.dipsIntoOuterContext { + s += ",dipsIntoOuterContext" + } + + return s +} + +type OrderedATNConfigSet struct { + *BaseATNConfigSet +} + +func NewOrderedATNConfigSet() *OrderedATNConfigSet { + b := NewBaseATNConfigSet(false) + + // This set uses the standard Hash() and Equals() from ATNConfig + b.configLookup = NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst) + + return &OrderedATNConfigSet{BaseATNConfigSet: b} +} + +func hashATNConfig(i interface{}) int { + o := i.(ATNConfig) + hash := 7 + hash = 31*hash + o.GetState().GetStateNumber() + hash = 31*hash + o.GetAlt() + hash = 31*hash + o.GetSemanticContext().Hash() + return hash +} + +func equalATNConfigs(a, b interface{}) bool { + if a == nil || b == nil { + return false + } + + if a == b { + return true + } + + var ai, ok = a.(ATNConfig) + var bi, ok1 = b.(ATNConfig) + + if !ok || !ok1 { + return false + } + + if ai.GetState().GetStateNumber() != bi.GetState().GetStateNumber() { + return false + } + + if ai.GetAlt() != bi.GetAlt() { + return false + } + + return ai.GetSemanticContext().Equals(bi.GetSemanticContext()) +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserialization_options.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserialization_options.go index 3c975ec7bf..f15dfba8d3 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserialization_options.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserialization_options.go @@ -1,61 +1,61 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import "errors" - -var defaultATNDeserializationOptions = ATNDeserializationOptions{true, true, false} - -type ATNDeserializationOptions struct { - readOnly bool - verifyATN bool - generateRuleBypassTransitions bool -} - -func (opts *ATNDeserializationOptions) ReadOnly() bool { - return opts.readOnly -} - -func (opts *ATNDeserializationOptions) SetReadOnly(readOnly bool) { - if opts.readOnly { - panic(errors.New("Cannot mutate read only ATNDeserializationOptions")) - } - opts.readOnly = readOnly -} - -func (opts *ATNDeserializationOptions) VerifyATN() bool { - return opts.verifyATN -} - -func (opts *ATNDeserializationOptions) SetVerifyATN(verifyATN bool) { - if opts.readOnly { - panic(errors.New("Cannot mutate read only ATNDeserializationOptions")) - } - opts.verifyATN = verifyATN -} - -func (opts *ATNDeserializationOptions) GenerateRuleBypassTransitions() bool { - return opts.generateRuleBypassTransitions -} - -func (opts *ATNDeserializationOptions) SetGenerateRuleBypassTransitions(generateRuleBypassTransitions bool) { - if opts.readOnly { - panic(errors.New("Cannot mutate read only ATNDeserializationOptions")) - } - opts.generateRuleBypassTransitions = generateRuleBypassTransitions -} - -func DefaultATNDeserializationOptions() *ATNDeserializationOptions { - return NewATNDeserializationOptions(&defaultATNDeserializationOptions) -} - -func NewATNDeserializationOptions(other *ATNDeserializationOptions) *ATNDeserializationOptions { - o := new(ATNDeserializationOptions) - if other != nil { - *o = *other - o.readOnly = false - } - return o -} +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import "errors" + +var defaultATNDeserializationOptions = ATNDeserializationOptions{true, true, false} + +type ATNDeserializationOptions struct { + readOnly bool + verifyATN bool + generateRuleBypassTransitions bool +} + +func (opts *ATNDeserializationOptions) ReadOnly() bool { + return opts.readOnly +} + +func (opts *ATNDeserializationOptions) SetReadOnly(readOnly bool) { + if opts.readOnly { + panic(errors.New("Cannot mutate read only ATNDeserializationOptions")) + } + opts.readOnly = readOnly +} + +func (opts *ATNDeserializationOptions) VerifyATN() bool { + return opts.verifyATN +} + +func (opts *ATNDeserializationOptions) SetVerifyATN(verifyATN bool) { + if opts.readOnly { + panic(errors.New("Cannot mutate read only ATNDeserializationOptions")) + } + opts.verifyATN = verifyATN +} + +func (opts *ATNDeserializationOptions) GenerateRuleBypassTransitions() bool { + return opts.generateRuleBypassTransitions +} + +func (opts *ATNDeserializationOptions) SetGenerateRuleBypassTransitions(generateRuleBypassTransitions bool) { + if opts.readOnly { + panic(errors.New("Cannot mutate read only ATNDeserializationOptions")) + } + opts.generateRuleBypassTransitions = generateRuleBypassTransitions +} + +func DefaultATNDeserializationOptions() *ATNDeserializationOptions { + return NewATNDeserializationOptions(&defaultATNDeserializationOptions) +} + +func NewATNDeserializationOptions(other *ATNDeserializationOptions) *ATNDeserializationOptions { + o := new(ATNDeserializationOptions) + if other != nil { + *o = *other + o.readOnly = false + } + return o +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserializer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserializer.go index 3888856b4b..e2dd68489b 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserializer.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_deserializer.go @@ -1,683 +1,683 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "strconv" -) - -const serializedVersion = 4 - -type loopEndStateIntPair struct { - item0 *LoopEndState - item1 int -} - -type blockStartStateIntPair struct { - item0 BlockStartState - item1 int -} - -type ATNDeserializer struct { - options *ATNDeserializationOptions - data []int32 - pos int -} - -func NewATNDeserializer(options *ATNDeserializationOptions) *ATNDeserializer { - if options == nil { - options = &defaultATNDeserializationOptions - } - - return &ATNDeserializer{options: options} -} - -func stringInSlice(a string, list []string) int { - for i, b := range list { - if b == a { - return i - } - } - - return -1 -} - -func (a *ATNDeserializer) Deserialize(data []int32) *ATN { - a.data = data - a.pos = 0 - a.checkVersion() - - atn := a.readATN() - - a.readStates(atn) - a.readRules(atn) - a.readModes(atn) - - sets := a.readSets(atn, nil) - - a.readEdges(atn, sets) - a.readDecisions(atn) - a.readLexerActions(atn) - a.markPrecedenceDecisions(atn) - a.verifyATN(atn) - - if a.options.GenerateRuleBypassTransitions() && atn.grammarType == ATNTypeParser { - a.generateRuleBypassTransitions(atn) - // Re-verify after modification - a.verifyATN(atn) - } - - return atn - -} - -func (a *ATNDeserializer) checkVersion() { - version := a.readInt() - - if version != serializedVersion { - panic("Could not deserialize ATN with version " + strconv.Itoa(version) + " (expected " + strconv.Itoa(serializedVersion) + ").") - } -} - -func (a *ATNDeserializer) readATN() *ATN { - grammarType := a.readInt() - maxTokenType := a.readInt() - - return NewATN(grammarType, maxTokenType) -} - -func (a *ATNDeserializer) readStates(atn *ATN) { - nstates := a.readInt() - - // Allocate worst case size. - loopBackStateNumbers := make([]loopEndStateIntPair, 0, nstates) - endStateNumbers := make([]blockStartStateIntPair, 0, nstates) - - // Preallocate states slice. - atn.states = make([]ATNState, 0, nstates) - - for i := 0; i < nstates; i++ { - stype := a.readInt() - - // Ignore bad types of states - if stype == ATNStateInvalidType { - atn.addState(nil) - continue - } - - ruleIndex := a.readInt() - - s := a.stateFactory(stype, ruleIndex) - - if stype == ATNStateLoopEnd { - loopBackStateNumber := a.readInt() - - loopBackStateNumbers = append(loopBackStateNumbers, loopEndStateIntPair{s.(*LoopEndState), loopBackStateNumber}) - } else if s2, ok := s.(BlockStartState); ok { - endStateNumber := a.readInt() - - endStateNumbers = append(endStateNumbers, blockStartStateIntPair{s2, endStateNumber}) - } - - atn.addState(s) - } - - // Delay the assignment of loop back and end states until we know all the state - // instances have been initialized - for _, pair := range loopBackStateNumbers { - pair.item0.loopBackState = atn.states[pair.item1] - } - - for _, pair := range endStateNumbers { - pair.item0.setEndState(atn.states[pair.item1].(*BlockEndState)) - } - - numNonGreedyStates := a.readInt() - for j := 0; j < numNonGreedyStates; j++ { - stateNumber := a.readInt() - - atn.states[stateNumber].(DecisionState).setNonGreedy(true) - } - - numPrecedenceStates := a.readInt() - for j := 0; j < numPrecedenceStates; j++ { - stateNumber := a.readInt() - - atn.states[stateNumber].(*RuleStartState).isPrecedenceRule = true - } -} - -func (a *ATNDeserializer) readRules(atn *ATN) { - nrules := a.readInt() - - if atn.grammarType == ATNTypeLexer { - atn.ruleToTokenType = make([]int, nrules) - } - - atn.ruleToStartState = make([]*RuleStartState, nrules) - - for i := range atn.ruleToStartState { - s := a.readInt() - startState := atn.states[s].(*RuleStartState) - - atn.ruleToStartState[i] = startState - - if atn.grammarType == ATNTypeLexer { - tokenType := a.readInt() - - atn.ruleToTokenType[i] = tokenType - } - } - - atn.ruleToStopState = make([]*RuleStopState, nrules) - - for _, state := range atn.states { - if s2, ok := state.(*RuleStopState); ok { - atn.ruleToStopState[s2.ruleIndex] = s2 - atn.ruleToStartState[s2.ruleIndex].stopState = s2 - } - } -} - -func (a *ATNDeserializer) readModes(atn *ATN) { - nmodes := a.readInt() - atn.modeToStartState = make([]*TokensStartState, nmodes) - - for i := range atn.modeToStartState { - s := a.readInt() - - atn.modeToStartState[i] = atn.states[s].(*TokensStartState) - } -} - -func (a *ATNDeserializer) readSets(atn *ATN, sets []*IntervalSet) []*IntervalSet { - m := a.readInt() - - // Preallocate the needed capacity. - if cap(sets)-len(sets) < m { - isets := make([]*IntervalSet, len(sets), len(sets)+m) - copy(isets, sets) - sets = isets - } - - for i := 0; i < m; i++ { - iset := NewIntervalSet() - - sets = append(sets, iset) - - n := a.readInt() - containsEOF := a.readInt() - - if containsEOF != 0 { - iset.addOne(-1) - } - - for j := 0; j < n; j++ { - i1 := a.readInt() - i2 := a.readInt() - - iset.addRange(i1, i2) - } - } - - return sets -} - -func (a *ATNDeserializer) readEdges(atn *ATN, sets []*IntervalSet) { - nedges := a.readInt() - - for i := 0; i < nedges; i++ { - var ( - src = a.readInt() - trg = a.readInt() - ttype = a.readInt() - arg1 = a.readInt() - arg2 = a.readInt() - arg3 = a.readInt() - trans = a.edgeFactory(atn, ttype, src, trg, arg1, arg2, arg3, sets) - srcState = atn.states[src] - ) - - srcState.AddTransition(trans, -1) - } - - // Edges for rule stop states can be derived, so they are not serialized - for _, state := range atn.states { - for _, t := range state.GetTransitions() { - var rt, ok = t.(*RuleTransition) - - if !ok { - continue - } - - outermostPrecedenceReturn := -1 - - if atn.ruleToStartState[rt.getTarget().GetRuleIndex()].isPrecedenceRule { - if rt.precedence == 0 { - outermostPrecedenceReturn = rt.getTarget().GetRuleIndex() - } - } - - trans := NewEpsilonTransition(rt.followState, outermostPrecedenceReturn) - - atn.ruleToStopState[rt.getTarget().GetRuleIndex()].AddTransition(trans, -1) - } - } - - for _, state := range atn.states { - if s2, ok := state.(BlockStartState); ok { - // We need to know the end state to set its start state - if s2.getEndState() == nil { - panic("IllegalState") - } - - // Block end states can only be associated to a single block start state - if s2.getEndState().startState != nil { - panic("IllegalState") - } - - s2.getEndState().startState = state - } - - if s2, ok := state.(*PlusLoopbackState); ok { - for _, t := range s2.GetTransitions() { - if t2, ok := t.getTarget().(*PlusBlockStartState); ok { - t2.loopBackState = state - } - } - } else if s2, ok := state.(*StarLoopbackState); ok { - for _, t := range s2.GetTransitions() { - if t2, ok := t.getTarget().(*StarLoopEntryState); ok { - t2.loopBackState = state - } - } - } - } -} - -func (a *ATNDeserializer) readDecisions(atn *ATN) { - ndecisions := a.readInt() - - for i := 0; i < ndecisions; i++ { - s := a.readInt() - decState := atn.states[s].(DecisionState) - - atn.DecisionToState = append(atn.DecisionToState, decState) - decState.setDecision(i) - } -} - -func (a *ATNDeserializer) readLexerActions(atn *ATN) { - if atn.grammarType == ATNTypeLexer { - count := a.readInt() - - atn.lexerActions = make([]LexerAction, count) - - for i := range atn.lexerActions { - actionType := a.readInt() - data1 := a.readInt() - data2 := a.readInt() - atn.lexerActions[i] = a.lexerActionFactory(actionType, data1, data2) - } - } -} - -func (a *ATNDeserializer) generateRuleBypassTransitions(atn *ATN) { - count := len(atn.ruleToStartState) - - for i := 0; i < count; i++ { - atn.ruleToTokenType[i] = atn.maxTokenType + i + 1 - } - - for i := 0; i < count; i++ { - a.generateRuleBypassTransition(atn, i) - } -} - -func (a *ATNDeserializer) generateRuleBypassTransition(atn *ATN, idx int) { - bypassStart := NewBasicBlockStartState() - - bypassStart.ruleIndex = idx - atn.addState(bypassStart) - - bypassStop := NewBlockEndState() - - bypassStop.ruleIndex = idx - atn.addState(bypassStop) - - bypassStart.endState = bypassStop - - atn.defineDecisionState(bypassStart.BaseDecisionState) - - bypassStop.startState = bypassStart - - var excludeTransition Transition - var endState ATNState - - if atn.ruleToStartState[idx].isPrecedenceRule { - // Wrap from the beginning of the rule to the StarLoopEntryState - endState = nil - - for i := 0; i < len(atn.states); i++ { - state := atn.states[i] - - if a.stateIsEndStateFor(state, idx) != nil { - endState = state - excludeTransition = state.(*StarLoopEntryState).loopBackState.GetTransitions()[0] - - break - } - } - - if excludeTransition == nil { - panic("Couldn't identify final state of the precedence rule prefix section.") - } - } else { - endState = atn.ruleToStopState[idx] - } - - // All non-excluded transitions that currently target end state need to target - // blockEnd instead - for i := 0; i < len(atn.states); i++ { - state := atn.states[i] - - for j := 0; j < len(state.GetTransitions()); j++ { - transition := state.GetTransitions()[j] - - if transition == excludeTransition { - continue - } - - if transition.getTarget() == endState { - transition.setTarget(bypassStop) - } - } - } - - // All transitions leaving the rule start state need to leave blockStart instead - ruleToStartState := atn.ruleToStartState[idx] - count := len(ruleToStartState.GetTransitions()) - - for count > 0 { - bypassStart.AddTransition(ruleToStartState.GetTransitions()[count-1], -1) - ruleToStartState.SetTransitions([]Transition{ruleToStartState.GetTransitions()[len(ruleToStartState.GetTransitions())-1]}) - } - - // Link the new states - atn.ruleToStartState[idx].AddTransition(NewEpsilonTransition(bypassStart, -1), -1) - bypassStop.AddTransition(NewEpsilonTransition(endState, -1), -1) - - MatchState := NewBasicState() - - atn.addState(MatchState) - MatchState.AddTransition(NewAtomTransition(bypassStop, atn.ruleToTokenType[idx]), -1) - bypassStart.AddTransition(NewEpsilonTransition(MatchState, -1), -1) -} - -func (a *ATNDeserializer) stateIsEndStateFor(state ATNState, idx int) ATNState { - if state.GetRuleIndex() != idx { - return nil - } - - if _, ok := state.(*StarLoopEntryState); !ok { - return nil - } - - maybeLoopEndState := state.GetTransitions()[len(state.GetTransitions())-1].getTarget() - - if _, ok := maybeLoopEndState.(*LoopEndState); !ok { - return nil - } - - var _, ok = maybeLoopEndState.GetTransitions()[0].getTarget().(*RuleStopState) - - if maybeLoopEndState.(*LoopEndState).epsilonOnlyTransitions && ok { - return state - } - - return nil -} - -// markPrecedenceDecisions analyzes the StarLoopEntryState states in the -// specified ATN to set the StarLoopEntryState.precedenceRuleDecision field to -// the correct value. -func (a *ATNDeserializer) markPrecedenceDecisions(atn *ATN) { - for _, state := range atn.states { - if _, ok := state.(*StarLoopEntryState); !ok { - continue - } - - // We analyze the ATN to determine if a ATN decision state is the - // decision for the closure block that determines whether a - // precedence rule should continue or complete. - if atn.ruleToStartState[state.GetRuleIndex()].isPrecedenceRule { - maybeLoopEndState := state.GetTransitions()[len(state.GetTransitions())-1].getTarget() - - if s3, ok := maybeLoopEndState.(*LoopEndState); ok { - var _, ok2 = maybeLoopEndState.GetTransitions()[0].getTarget().(*RuleStopState) - - if s3.epsilonOnlyTransitions && ok2 { - state.(*StarLoopEntryState).precedenceRuleDecision = true - } - } - } - } -} - -func (a *ATNDeserializer) verifyATN(atn *ATN) { - if !a.options.VerifyATN() { - return - } - - // Verify assumptions - for _, state := range atn.states { - if state == nil { - continue - } - - a.checkCondition(state.GetEpsilonOnlyTransitions() || len(state.GetTransitions()) <= 1, "") - - switch s2 := state.(type) { - case *PlusBlockStartState: - a.checkCondition(s2.loopBackState != nil, "") - - case *StarLoopEntryState: - a.checkCondition(s2.loopBackState != nil, "") - a.checkCondition(len(s2.GetTransitions()) == 2, "") - - switch s2.transitions[0].getTarget().(type) { - case *StarBlockStartState: - _, ok := s2.transitions[1].getTarget().(*LoopEndState) - - a.checkCondition(ok, "") - a.checkCondition(!s2.nonGreedy, "") - - case *LoopEndState: - var _, ok = s2.transitions[1].getTarget().(*StarBlockStartState) - - a.checkCondition(ok, "") - a.checkCondition(s2.nonGreedy, "") - - default: - panic("IllegalState") - } - - case *StarLoopbackState: - a.checkCondition(len(state.GetTransitions()) == 1, "") - - var _, ok = state.GetTransitions()[0].getTarget().(*StarLoopEntryState) - - a.checkCondition(ok, "") - - case *LoopEndState: - a.checkCondition(s2.loopBackState != nil, "") - - case *RuleStartState: - a.checkCondition(s2.stopState != nil, "") - - case BlockStartState: - a.checkCondition(s2.getEndState() != nil, "") - - case *BlockEndState: - a.checkCondition(s2.startState != nil, "") - - case DecisionState: - a.checkCondition(len(s2.GetTransitions()) <= 1 || s2.getDecision() >= 0, "") - - default: - var _, ok = s2.(*RuleStopState) - - a.checkCondition(len(s2.GetTransitions()) <= 1 || ok, "") - } - } -} - -func (a *ATNDeserializer) checkCondition(condition bool, message string) { - if !condition { - if message == "" { - message = "IllegalState" - } - - panic(message) - } -} - -func (a *ATNDeserializer) readInt() int { - v := a.data[a.pos] - - a.pos++ - - return int(v) // data is 32 bits but int is at least that big -} - -func (a *ATNDeserializer) edgeFactory(atn *ATN, typeIndex, src, trg, arg1, arg2, arg3 int, sets []*IntervalSet) Transition { - target := atn.states[trg] - - switch typeIndex { - case TransitionEPSILON: - return NewEpsilonTransition(target, -1) - - case TransitionRANGE: - if arg3 != 0 { - return NewRangeTransition(target, TokenEOF, arg2) - } - - return NewRangeTransition(target, arg1, arg2) - - case TransitionRULE: - return NewRuleTransition(atn.states[arg1], arg2, arg3, target) - - case TransitionPREDICATE: - return NewPredicateTransition(target, arg1, arg2, arg3 != 0) - - case TransitionPRECEDENCE: - return NewPrecedencePredicateTransition(target, arg1) - - case TransitionATOM: - if arg3 != 0 { - return NewAtomTransition(target, TokenEOF) - } - - return NewAtomTransition(target, arg1) - - case TransitionACTION: - return NewActionTransition(target, arg1, arg2, arg3 != 0) - - case TransitionSET: - return NewSetTransition(target, sets[arg1]) - - case TransitionNOTSET: - return NewNotSetTransition(target, sets[arg1]) - - case TransitionWILDCARD: - return NewWildcardTransition(target) - } - - panic("The specified transition type is not valid.") -} - -func (a *ATNDeserializer) stateFactory(typeIndex, ruleIndex int) ATNState { - var s ATNState - - switch typeIndex { - case ATNStateInvalidType: - return nil - - case ATNStateBasic: - s = NewBasicState() - - case ATNStateRuleStart: - s = NewRuleStartState() - - case ATNStateBlockStart: - s = NewBasicBlockStartState() - - case ATNStatePlusBlockStart: - s = NewPlusBlockStartState() - - case ATNStateStarBlockStart: - s = NewStarBlockStartState() - - case ATNStateTokenStart: - s = NewTokensStartState() - - case ATNStateRuleStop: - s = NewRuleStopState() - - case ATNStateBlockEnd: - s = NewBlockEndState() - - case ATNStateStarLoopBack: - s = NewStarLoopbackState() - - case ATNStateStarLoopEntry: - s = NewStarLoopEntryState() - - case ATNStatePlusLoopBack: - s = NewPlusLoopbackState() - - case ATNStateLoopEnd: - s = NewLoopEndState() - - default: - panic(fmt.Sprintf("state type %d is invalid", typeIndex)) - } - - s.SetRuleIndex(ruleIndex) - - return s -} - -func (a *ATNDeserializer) lexerActionFactory(typeIndex, data1, data2 int) LexerAction { - switch typeIndex { - case LexerActionTypeChannel: - return NewLexerChannelAction(data1) - - case LexerActionTypeCustom: - return NewLexerCustomAction(data1, data2) - - case LexerActionTypeMode: - return NewLexerModeAction(data1) - - case LexerActionTypeMore: - return LexerMoreActionINSTANCE - - case LexerActionTypePopMode: - return LexerPopModeActionINSTANCE - - case LexerActionTypePushMode: - return NewLexerPushModeAction(data1) - - case LexerActionTypeSkip: - return LexerSkipActionINSTANCE - - case LexerActionTypeType: - return NewLexerTypeAction(data1) - - default: - panic(fmt.Sprintf("lexer action %d is invalid", typeIndex)) - } -} +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" +) + +const serializedVersion = 4 + +type loopEndStateIntPair struct { + item0 *LoopEndState + item1 int +} + +type blockStartStateIntPair struct { + item0 BlockStartState + item1 int +} + +type ATNDeserializer struct { + options *ATNDeserializationOptions + data []int32 + pos int +} + +func NewATNDeserializer(options *ATNDeserializationOptions) *ATNDeserializer { + if options == nil { + options = &defaultATNDeserializationOptions + } + + return &ATNDeserializer{options: options} +} + +func stringInSlice(a string, list []string) int { + for i, b := range list { + if b == a { + return i + } + } + + return -1 +} + +func (a *ATNDeserializer) Deserialize(data []int32) *ATN { + a.data = data + a.pos = 0 + a.checkVersion() + + atn := a.readATN() + + a.readStates(atn) + a.readRules(atn) + a.readModes(atn) + + sets := a.readSets(atn, nil) + + a.readEdges(atn, sets) + a.readDecisions(atn) + a.readLexerActions(atn) + a.markPrecedenceDecisions(atn) + a.verifyATN(atn) + + if a.options.GenerateRuleBypassTransitions() && atn.grammarType == ATNTypeParser { + a.generateRuleBypassTransitions(atn) + // Re-verify after modification + a.verifyATN(atn) + } + + return atn + +} + +func (a *ATNDeserializer) checkVersion() { + version := a.readInt() + + if version != serializedVersion { + panic("Could not deserialize ATN with version " + strconv.Itoa(version) + " (expected " + strconv.Itoa(serializedVersion) + ").") + } +} + +func (a *ATNDeserializer) readATN() *ATN { + grammarType := a.readInt() + maxTokenType := a.readInt() + + return NewATN(grammarType, maxTokenType) +} + +func (a *ATNDeserializer) readStates(atn *ATN) { + nstates := a.readInt() + + // Allocate worst case size. + loopBackStateNumbers := make([]loopEndStateIntPair, 0, nstates) + endStateNumbers := make([]blockStartStateIntPair, 0, nstates) + + // Preallocate states slice. + atn.states = make([]ATNState, 0, nstates) + + for i := 0; i < nstates; i++ { + stype := a.readInt() + + // Ignore bad types of states + if stype == ATNStateInvalidType { + atn.addState(nil) + continue + } + + ruleIndex := a.readInt() + + s := a.stateFactory(stype, ruleIndex) + + if stype == ATNStateLoopEnd { + loopBackStateNumber := a.readInt() + + loopBackStateNumbers = append(loopBackStateNumbers, loopEndStateIntPair{s.(*LoopEndState), loopBackStateNumber}) + } else if s2, ok := s.(BlockStartState); ok { + endStateNumber := a.readInt() + + endStateNumbers = append(endStateNumbers, blockStartStateIntPair{s2, endStateNumber}) + } + + atn.addState(s) + } + + // Delay the assignment of loop back and end states until we know all the state + // instances have been initialized + for _, pair := range loopBackStateNumbers { + pair.item0.loopBackState = atn.states[pair.item1] + } + + for _, pair := range endStateNumbers { + pair.item0.setEndState(atn.states[pair.item1].(*BlockEndState)) + } + + numNonGreedyStates := a.readInt() + for j := 0; j < numNonGreedyStates; j++ { + stateNumber := a.readInt() + + atn.states[stateNumber].(DecisionState).setNonGreedy(true) + } + + numPrecedenceStates := a.readInt() + for j := 0; j < numPrecedenceStates; j++ { + stateNumber := a.readInt() + + atn.states[stateNumber].(*RuleStartState).isPrecedenceRule = true + } +} + +func (a *ATNDeserializer) readRules(atn *ATN) { + nrules := a.readInt() + + if atn.grammarType == ATNTypeLexer { + atn.ruleToTokenType = make([]int, nrules) + } + + atn.ruleToStartState = make([]*RuleStartState, nrules) + + for i := range atn.ruleToStartState { + s := a.readInt() + startState := atn.states[s].(*RuleStartState) + + atn.ruleToStartState[i] = startState + + if atn.grammarType == ATNTypeLexer { + tokenType := a.readInt() + + atn.ruleToTokenType[i] = tokenType + } + } + + atn.ruleToStopState = make([]*RuleStopState, nrules) + + for _, state := range atn.states { + if s2, ok := state.(*RuleStopState); ok { + atn.ruleToStopState[s2.ruleIndex] = s2 + atn.ruleToStartState[s2.ruleIndex].stopState = s2 + } + } +} + +func (a *ATNDeserializer) readModes(atn *ATN) { + nmodes := a.readInt() + atn.modeToStartState = make([]*TokensStartState, nmodes) + + for i := range atn.modeToStartState { + s := a.readInt() + + atn.modeToStartState[i] = atn.states[s].(*TokensStartState) + } +} + +func (a *ATNDeserializer) readSets(atn *ATN, sets []*IntervalSet) []*IntervalSet { + m := a.readInt() + + // Preallocate the needed capacity. + if cap(sets)-len(sets) < m { + isets := make([]*IntervalSet, len(sets), len(sets)+m) + copy(isets, sets) + sets = isets + } + + for i := 0; i < m; i++ { + iset := NewIntervalSet() + + sets = append(sets, iset) + + n := a.readInt() + containsEOF := a.readInt() + + if containsEOF != 0 { + iset.addOne(-1) + } + + for j := 0; j < n; j++ { + i1 := a.readInt() + i2 := a.readInt() + + iset.addRange(i1, i2) + } + } + + return sets +} + +func (a *ATNDeserializer) readEdges(atn *ATN, sets []*IntervalSet) { + nedges := a.readInt() + + for i := 0; i < nedges; i++ { + var ( + src = a.readInt() + trg = a.readInt() + ttype = a.readInt() + arg1 = a.readInt() + arg2 = a.readInt() + arg3 = a.readInt() + trans = a.edgeFactory(atn, ttype, src, trg, arg1, arg2, arg3, sets) + srcState = atn.states[src] + ) + + srcState.AddTransition(trans, -1) + } + + // Edges for rule stop states can be derived, so they are not serialized + for _, state := range atn.states { + for _, t := range state.GetTransitions() { + var rt, ok = t.(*RuleTransition) + + if !ok { + continue + } + + outermostPrecedenceReturn := -1 + + if atn.ruleToStartState[rt.getTarget().GetRuleIndex()].isPrecedenceRule { + if rt.precedence == 0 { + outermostPrecedenceReturn = rt.getTarget().GetRuleIndex() + } + } + + trans := NewEpsilonTransition(rt.followState, outermostPrecedenceReturn) + + atn.ruleToStopState[rt.getTarget().GetRuleIndex()].AddTransition(trans, -1) + } + } + + for _, state := range atn.states { + if s2, ok := state.(BlockStartState); ok { + // We need to know the end state to set its start state + if s2.getEndState() == nil { + panic("IllegalState") + } + + // Block end states can only be associated to a single block start state + if s2.getEndState().startState != nil { + panic("IllegalState") + } + + s2.getEndState().startState = state + } + + if s2, ok := state.(*PlusLoopbackState); ok { + for _, t := range s2.GetTransitions() { + if t2, ok := t.getTarget().(*PlusBlockStartState); ok { + t2.loopBackState = state + } + } + } else if s2, ok := state.(*StarLoopbackState); ok { + for _, t := range s2.GetTransitions() { + if t2, ok := t.getTarget().(*StarLoopEntryState); ok { + t2.loopBackState = state + } + } + } + } +} + +func (a *ATNDeserializer) readDecisions(atn *ATN) { + ndecisions := a.readInt() + + for i := 0; i < ndecisions; i++ { + s := a.readInt() + decState := atn.states[s].(DecisionState) + + atn.DecisionToState = append(atn.DecisionToState, decState) + decState.setDecision(i) + } +} + +func (a *ATNDeserializer) readLexerActions(atn *ATN) { + if atn.grammarType == ATNTypeLexer { + count := a.readInt() + + atn.lexerActions = make([]LexerAction, count) + + for i := range atn.lexerActions { + actionType := a.readInt() + data1 := a.readInt() + data2 := a.readInt() + atn.lexerActions[i] = a.lexerActionFactory(actionType, data1, data2) + } + } +} + +func (a *ATNDeserializer) generateRuleBypassTransitions(atn *ATN) { + count := len(atn.ruleToStartState) + + for i := 0; i < count; i++ { + atn.ruleToTokenType[i] = atn.maxTokenType + i + 1 + } + + for i := 0; i < count; i++ { + a.generateRuleBypassTransition(atn, i) + } +} + +func (a *ATNDeserializer) generateRuleBypassTransition(atn *ATN, idx int) { + bypassStart := NewBasicBlockStartState() + + bypassStart.ruleIndex = idx + atn.addState(bypassStart) + + bypassStop := NewBlockEndState() + + bypassStop.ruleIndex = idx + atn.addState(bypassStop) + + bypassStart.endState = bypassStop + + atn.defineDecisionState(bypassStart.BaseDecisionState) + + bypassStop.startState = bypassStart + + var excludeTransition Transition + var endState ATNState + + if atn.ruleToStartState[idx].isPrecedenceRule { + // Wrap from the beginning of the rule to the StarLoopEntryState + endState = nil + + for i := 0; i < len(atn.states); i++ { + state := atn.states[i] + + if a.stateIsEndStateFor(state, idx) != nil { + endState = state + excludeTransition = state.(*StarLoopEntryState).loopBackState.GetTransitions()[0] + + break + } + } + + if excludeTransition == nil { + panic("Couldn't identify final state of the precedence rule prefix section.") + } + } else { + endState = atn.ruleToStopState[idx] + } + + // All non-excluded transitions that currently target end state need to target + // blockEnd instead + for i := 0; i < len(atn.states); i++ { + state := atn.states[i] + + for j := 0; j < len(state.GetTransitions()); j++ { + transition := state.GetTransitions()[j] + + if transition == excludeTransition { + continue + } + + if transition.getTarget() == endState { + transition.setTarget(bypassStop) + } + } + } + + // All transitions leaving the rule start state need to leave blockStart instead + ruleToStartState := atn.ruleToStartState[idx] + count := len(ruleToStartState.GetTransitions()) + + for count > 0 { + bypassStart.AddTransition(ruleToStartState.GetTransitions()[count-1], -1) + ruleToStartState.SetTransitions([]Transition{ruleToStartState.GetTransitions()[len(ruleToStartState.GetTransitions())-1]}) + } + + // Link the new states + atn.ruleToStartState[idx].AddTransition(NewEpsilonTransition(bypassStart, -1), -1) + bypassStop.AddTransition(NewEpsilonTransition(endState, -1), -1) + + MatchState := NewBasicState() + + atn.addState(MatchState) + MatchState.AddTransition(NewAtomTransition(bypassStop, atn.ruleToTokenType[idx]), -1) + bypassStart.AddTransition(NewEpsilonTransition(MatchState, -1), -1) +} + +func (a *ATNDeserializer) stateIsEndStateFor(state ATNState, idx int) ATNState { + if state.GetRuleIndex() != idx { + return nil + } + + if _, ok := state.(*StarLoopEntryState); !ok { + return nil + } + + maybeLoopEndState := state.GetTransitions()[len(state.GetTransitions())-1].getTarget() + + if _, ok := maybeLoopEndState.(*LoopEndState); !ok { + return nil + } + + var _, ok = maybeLoopEndState.GetTransitions()[0].getTarget().(*RuleStopState) + + if maybeLoopEndState.(*LoopEndState).epsilonOnlyTransitions && ok { + return state + } + + return nil +} + +// markPrecedenceDecisions analyzes the StarLoopEntryState states in the +// specified ATN to set the StarLoopEntryState.precedenceRuleDecision field to +// the correct value. +func (a *ATNDeserializer) markPrecedenceDecisions(atn *ATN) { + for _, state := range atn.states { + if _, ok := state.(*StarLoopEntryState); !ok { + continue + } + + // We analyze the ATN to determine if a ATN decision state is the + // decision for the closure block that determines whether a + // precedence rule should continue or complete. + if atn.ruleToStartState[state.GetRuleIndex()].isPrecedenceRule { + maybeLoopEndState := state.GetTransitions()[len(state.GetTransitions())-1].getTarget() + + if s3, ok := maybeLoopEndState.(*LoopEndState); ok { + var _, ok2 = maybeLoopEndState.GetTransitions()[0].getTarget().(*RuleStopState) + + if s3.epsilonOnlyTransitions && ok2 { + state.(*StarLoopEntryState).precedenceRuleDecision = true + } + } + } + } +} + +func (a *ATNDeserializer) verifyATN(atn *ATN) { + if !a.options.VerifyATN() { + return + } + + // Verify assumptions + for _, state := range atn.states { + if state == nil { + continue + } + + a.checkCondition(state.GetEpsilonOnlyTransitions() || len(state.GetTransitions()) <= 1, "") + + switch s2 := state.(type) { + case *PlusBlockStartState: + a.checkCondition(s2.loopBackState != nil, "") + + case *StarLoopEntryState: + a.checkCondition(s2.loopBackState != nil, "") + a.checkCondition(len(s2.GetTransitions()) == 2, "") + + switch s2.transitions[0].getTarget().(type) { + case *StarBlockStartState: + _, ok := s2.transitions[1].getTarget().(*LoopEndState) + + a.checkCondition(ok, "") + a.checkCondition(!s2.nonGreedy, "") + + case *LoopEndState: + var _, ok = s2.transitions[1].getTarget().(*StarBlockStartState) + + a.checkCondition(ok, "") + a.checkCondition(s2.nonGreedy, "") + + default: + panic("IllegalState") + } + + case *StarLoopbackState: + a.checkCondition(len(state.GetTransitions()) == 1, "") + + var _, ok = state.GetTransitions()[0].getTarget().(*StarLoopEntryState) + + a.checkCondition(ok, "") + + case *LoopEndState: + a.checkCondition(s2.loopBackState != nil, "") + + case *RuleStartState: + a.checkCondition(s2.stopState != nil, "") + + case BlockStartState: + a.checkCondition(s2.getEndState() != nil, "") + + case *BlockEndState: + a.checkCondition(s2.startState != nil, "") + + case DecisionState: + a.checkCondition(len(s2.GetTransitions()) <= 1 || s2.getDecision() >= 0, "") + + default: + var _, ok = s2.(*RuleStopState) + + a.checkCondition(len(s2.GetTransitions()) <= 1 || ok, "") + } + } +} + +func (a *ATNDeserializer) checkCondition(condition bool, message string) { + if !condition { + if message == "" { + message = "IllegalState" + } + + panic(message) + } +} + +func (a *ATNDeserializer) readInt() int { + v := a.data[a.pos] + + a.pos++ + + return int(v) // data is 32 bits but int is at least that big +} + +func (a *ATNDeserializer) edgeFactory(atn *ATN, typeIndex, src, trg, arg1, arg2, arg3 int, sets []*IntervalSet) Transition { + target := atn.states[trg] + + switch typeIndex { + case TransitionEPSILON: + return NewEpsilonTransition(target, -1) + + case TransitionRANGE: + if arg3 != 0 { + return NewRangeTransition(target, TokenEOF, arg2) + } + + return NewRangeTransition(target, arg1, arg2) + + case TransitionRULE: + return NewRuleTransition(atn.states[arg1], arg2, arg3, target) + + case TransitionPREDICATE: + return NewPredicateTransition(target, arg1, arg2, arg3 != 0) + + case TransitionPRECEDENCE: + return NewPrecedencePredicateTransition(target, arg1) + + case TransitionATOM: + if arg3 != 0 { + return NewAtomTransition(target, TokenEOF) + } + + return NewAtomTransition(target, arg1) + + case TransitionACTION: + return NewActionTransition(target, arg1, arg2, arg3 != 0) + + case TransitionSET: + return NewSetTransition(target, sets[arg1]) + + case TransitionNOTSET: + return NewNotSetTransition(target, sets[arg1]) + + case TransitionWILDCARD: + return NewWildcardTransition(target) + } + + panic("The specified transition type is not valid.") +} + +func (a *ATNDeserializer) stateFactory(typeIndex, ruleIndex int) ATNState { + var s ATNState + + switch typeIndex { + case ATNStateInvalidType: + return nil + + case ATNStateBasic: + s = NewBasicState() + + case ATNStateRuleStart: + s = NewRuleStartState() + + case ATNStateBlockStart: + s = NewBasicBlockStartState() + + case ATNStatePlusBlockStart: + s = NewPlusBlockStartState() + + case ATNStateStarBlockStart: + s = NewStarBlockStartState() + + case ATNStateTokenStart: + s = NewTokensStartState() + + case ATNStateRuleStop: + s = NewRuleStopState() + + case ATNStateBlockEnd: + s = NewBlockEndState() + + case ATNStateStarLoopBack: + s = NewStarLoopbackState() + + case ATNStateStarLoopEntry: + s = NewStarLoopEntryState() + + case ATNStatePlusLoopBack: + s = NewPlusLoopbackState() + + case ATNStateLoopEnd: + s = NewLoopEndState() + + default: + panic(fmt.Sprintf("state type %d is invalid", typeIndex)) + } + + s.SetRuleIndex(ruleIndex) + + return s +} + +func (a *ATNDeserializer) lexerActionFactory(typeIndex, data1, data2 int) LexerAction { + switch typeIndex { + case LexerActionTypeChannel: + return NewLexerChannelAction(data1) + + case LexerActionTypeCustom: + return NewLexerCustomAction(data1, data2) + + case LexerActionTypeMode: + return NewLexerModeAction(data1) + + case LexerActionTypeMore: + return LexerMoreActionINSTANCE + + case LexerActionTypePopMode: + return LexerPopModeActionINSTANCE + + case LexerActionTypePushMode: + return NewLexerPushModeAction(data1) + + case LexerActionTypeSkip: + return LexerSkipActionINSTANCE + + case LexerActionTypeType: + return NewLexerTypeAction(data1) + + default: + panic(fmt.Sprintf("lexer action %d is invalid", typeIndex)) + } +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_simulator.go index 41529115fa..a353be7bb6 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_simulator.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_simulator.go @@ -1,50 +1,50 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -var ATNSimulatorError = NewDFAState(0x7FFFFFFF, NewBaseATNConfigSet(false)) - -type IATNSimulator interface { - SharedContextCache() *PredictionContextCache - ATN() *ATN - DecisionToDFA() []*DFA -} - -type BaseATNSimulator struct { - atn *ATN - sharedContextCache *PredictionContextCache - decisionToDFA []*DFA -} - -func NewBaseATNSimulator(atn *ATN, sharedContextCache *PredictionContextCache) *BaseATNSimulator { - b := new(BaseATNSimulator) - - b.atn = atn - b.sharedContextCache = sharedContextCache - - return b -} - -func (b *BaseATNSimulator) getCachedContext(context PredictionContext) PredictionContext { - if b.sharedContextCache == nil { - return context - } - - visited := make(map[PredictionContext]PredictionContext) - - return getCachedBasePredictionContext(context, b.sharedContextCache, visited) -} - -func (b *BaseATNSimulator) SharedContextCache() *PredictionContextCache { - return b.sharedContextCache -} - -func (b *BaseATNSimulator) ATN() *ATN { - return b.atn -} - -func (b *BaseATNSimulator) DecisionToDFA() []*DFA { - return b.decisionToDFA -} +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +var ATNSimulatorError = NewDFAState(0x7FFFFFFF, NewBaseATNConfigSet(false)) + +type IATNSimulator interface { + SharedContextCache() *PredictionContextCache + ATN() *ATN + DecisionToDFA() []*DFA +} + +type BaseATNSimulator struct { + atn *ATN + sharedContextCache *PredictionContextCache + decisionToDFA []*DFA +} + +func NewBaseATNSimulator(atn *ATN, sharedContextCache *PredictionContextCache) *BaseATNSimulator { + b := new(BaseATNSimulator) + + b.atn = atn + b.sharedContextCache = sharedContextCache + + return b +} + +func (b *BaseATNSimulator) getCachedContext(context PredictionContext) PredictionContext { + if b.sharedContextCache == nil { + return context + } + + visited := make(map[PredictionContext]PredictionContext) + + return getCachedBasePredictionContext(context, b.sharedContextCache, visited) +} + +func (b *BaseATNSimulator) SharedContextCache() *PredictionContextCache { + return b.sharedContextCache +} + +func (b *BaseATNSimulator) ATN() *ATN { + return b.atn +} + +func (b *BaseATNSimulator) DecisionToDFA() []*DFA { + return b.decisionToDFA +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_state.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_state.go index 1f2a56bc31..2a42fc2278 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_state.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_state.go @@ -1,393 +1,393 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import "strconv" - -// Constants for serialization. -const ( - ATNStateInvalidType = 0 - ATNStateBasic = 1 - ATNStateRuleStart = 2 - ATNStateBlockStart = 3 - ATNStatePlusBlockStart = 4 - ATNStateStarBlockStart = 5 - ATNStateTokenStart = 6 - ATNStateRuleStop = 7 - ATNStateBlockEnd = 8 - ATNStateStarLoopBack = 9 - ATNStateStarLoopEntry = 10 - ATNStatePlusLoopBack = 11 - ATNStateLoopEnd = 12 - - ATNStateInvalidStateNumber = -1 -) - -var ATNStateInitialNumTransitions = 4 - -type ATNState interface { - GetEpsilonOnlyTransitions() bool - - GetRuleIndex() int - SetRuleIndex(int) - - GetNextTokenWithinRule() *IntervalSet - SetNextTokenWithinRule(*IntervalSet) - - GetATN() *ATN - SetATN(*ATN) - - GetStateType() int - - GetStateNumber() int - SetStateNumber(int) - - GetTransitions() []Transition - SetTransitions([]Transition) - AddTransition(Transition, int) - - String() string - Hash() int - Equals(Collectable[ATNState]) bool -} - -type BaseATNState struct { - // NextTokenWithinRule caches lookahead during parsing. Not used during construction. - NextTokenWithinRule *IntervalSet - - // atn is the current ATN. - atn *ATN - - epsilonOnlyTransitions bool - - // ruleIndex tracks the Rule index because there are no Rule objects at runtime. - ruleIndex int - - stateNumber int - - stateType int - - // Track the transitions emanating from this ATN state. - transitions []Transition -} - -func NewBaseATNState() *BaseATNState { - return &BaseATNState{stateNumber: ATNStateInvalidStateNumber, stateType: ATNStateInvalidType} -} - -func (as *BaseATNState) GetRuleIndex() int { - return as.ruleIndex -} - -func (as *BaseATNState) SetRuleIndex(v int) { - as.ruleIndex = v -} -func (as *BaseATNState) GetEpsilonOnlyTransitions() bool { - return as.epsilonOnlyTransitions -} - -func (as *BaseATNState) GetATN() *ATN { - return as.atn -} - -func (as *BaseATNState) SetATN(atn *ATN) { - as.atn = atn -} - -func (as *BaseATNState) GetTransitions() []Transition { - return as.transitions -} - -func (as *BaseATNState) SetTransitions(t []Transition) { - as.transitions = t -} - -func (as *BaseATNState) GetStateType() int { - return as.stateType -} - -func (as *BaseATNState) GetStateNumber() int { - return as.stateNumber -} - -func (as *BaseATNState) SetStateNumber(stateNumber int) { - as.stateNumber = stateNumber -} - -func (as *BaseATNState) GetNextTokenWithinRule() *IntervalSet { - return as.NextTokenWithinRule -} - -func (as *BaseATNState) SetNextTokenWithinRule(v *IntervalSet) { - as.NextTokenWithinRule = v -} - -func (as *BaseATNState) Hash() int { - return as.stateNumber -} - -func (as *BaseATNState) String() string { - return strconv.Itoa(as.stateNumber) -} - -func (as *BaseATNState) Equals(other Collectable[ATNState]) bool { - if ot, ok := other.(ATNState); ok { - return as.stateNumber == ot.GetStateNumber() - } - - return false -} - -func (as *BaseATNState) isNonGreedyExitState() bool { - return false -} - -func (as *BaseATNState) AddTransition(trans Transition, index int) { - if len(as.transitions) == 0 { - as.epsilonOnlyTransitions = trans.getIsEpsilon() - } else if as.epsilonOnlyTransitions != trans.getIsEpsilon() { - as.epsilonOnlyTransitions = false - } - - if index == -1 { - as.transitions = append(as.transitions, trans) - } else { - as.transitions = append(as.transitions[:index], append([]Transition{trans}, as.transitions[index:]...)...) - // TODO: as.transitions.splice(index, 1, trans) - } -} - -type BasicState struct { - *BaseATNState -} - -func NewBasicState() *BasicState { - b := NewBaseATNState() - - b.stateType = ATNStateBasic - - return &BasicState{BaseATNState: b} -} - -type DecisionState interface { - ATNState - - getDecision() int - setDecision(int) - - getNonGreedy() bool - setNonGreedy(bool) -} - -type BaseDecisionState struct { - *BaseATNState - decision int - nonGreedy bool -} - -func NewBaseDecisionState() *BaseDecisionState { - return &BaseDecisionState{BaseATNState: NewBaseATNState(), decision: -1} -} - -func (s *BaseDecisionState) getDecision() int { - return s.decision -} - -func (s *BaseDecisionState) setDecision(b int) { - s.decision = b -} - -func (s *BaseDecisionState) getNonGreedy() bool { - return s.nonGreedy -} - -func (s *BaseDecisionState) setNonGreedy(b bool) { - s.nonGreedy = b -} - -type BlockStartState interface { - DecisionState - - getEndState() *BlockEndState - setEndState(*BlockEndState) -} - -// BaseBlockStartState is the start of a regular (...) block. -type BaseBlockStartState struct { - *BaseDecisionState - endState *BlockEndState -} - -func NewBlockStartState() *BaseBlockStartState { - return &BaseBlockStartState{BaseDecisionState: NewBaseDecisionState()} -} - -func (s *BaseBlockStartState) getEndState() *BlockEndState { - return s.endState -} - -func (s *BaseBlockStartState) setEndState(b *BlockEndState) { - s.endState = b -} - -type BasicBlockStartState struct { - *BaseBlockStartState -} - -func NewBasicBlockStartState() *BasicBlockStartState { - b := NewBlockStartState() - - b.stateType = ATNStateBlockStart - - return &BasicBlockStartState{BaseBlockStartState: b} -} - -var _ BlockStartState = &BasicBlockStartState{} - -// BlockEndState is a terminal node of a simple (a|b|c) block. -type BlockEndState struct { - *BaseATNState - startState ATNState -} - -func NewBlockEndState() *BlockEndState { - b := NewBaseATNState() - - b.stateType = ATNStateBlockEnd - - return &BlockEndState{BaseATNState: b} -} - -// RuleStopState is the last node in the ATN for a rule, unless that rule is the -// start symbol. In that case, there is one transition to EOF. Later, we might -// encode references to all calls to this rule to compute FOLLOW sets for error -// handling. -type RuleStopState struct { - *BaseATNState -} - -func NewRuleStopState() *RuleStopState { - b := NewBaseATNState() - - b.stateType = ATNStateRuleStop - - return &RuleStopState{BaseATNState: b} -} - -type RuleStartState struct { - *BaseATNState - stopState ATNState - isPrecedenceRule bool -} - -func NewRuleStartState() *RuleStartState { - b := NewBaseATNState() - - b.stateType = ATNStateRuleStart - - return &RuleStartState{BaseATNState: b} -} - -// PlusLoopbackState is a decision state for A+ and (A|B)+. It has two -// transitions: one to the loop back to start of the block, and one to exit. -type PlusLoopbackState struct { - *BaseDecisionState -} - -func NewPlusLoopbackState() *PlusLoopbackState { - b := NewBaseDecisionState() - - b.stateType = ATNStatePlusLoopBack - - return &PlusLoopbackState{BaseDecisionState: b} -} - -// PlusBlockStartState is the start of a (A|B|...)+ loop. Technically it is a -// decision state; we don't use it for code generation. Somebody might need it, -// it is included for completeness. In reality, PlusLoopbackState is the real -// decision-making node for A+. -type PlusBlockStartState struct { - *BaseBlockStartState - loopBackState ATNState -} - -func NewPlusBlockStartState() *PlusBlockStartState { - b := NewBlockStartState() - - b.stateType = ATNStatePlusBlockStart - - return &PlusBlockStartState{BaseBlockStartState: b} -} - -var _ BlockStartState = &PlusBlockStartState{} - -// StarBlockStartState is the block that begins a closure loop. -type StarBlockStartState struct { - *BaseBlockStartState -} - -func NewStarBlockStartState() *StarBlockStartState { - b := NewBlockStartState() - - b.stateType = ATNStateStarBlockStart - - return &StarBlockStartState{BaseBlockStartState: b} -} - -var _ BlockStartState = &StarBlockStartState{} - -type StarLoopbackState struct { - *BaseATNState -} - -func NewStarLoopbackState() *StarLoopbackState { - b := NewBaseATNState() - - b.stateType = ATNStateStarLoopBack - - return &StarLoopbackState{BaseATNState: b} -} - -type StarLoopEntryState struct { - *BaseDecisionState - loopBackState ATNState - precedenceRuleDecision bool -} - -func NewStarLoopEntryState() *StarLoopEntryState { - b := NewBaseDecisionState() - - b.stateType = ATNStateStarLoopEntry - - // False precedenceRuleDecision indicates whether s state can benefit from a precedence DFA during SLL decision making. - return &StarLoopEntryState{BaseDecisionState: b} -} - -// LoopEndState marks the end of a * or + loop. -type LoopEndState struct { - *BaseATNState - loopBackState ATNState -} - -func NewLoopEndState() *LoopEndState { - b := NewBaseATNState() - - b.stateType = ATNStateLoopEnd - - return &LoopEndState{BaseATNState: b} -} - -// TokensStartState is the Tokens rule start state linking to each lexer rule start state. -type TokensStartState struct { - *BaseDecisionState -} - -func NewTokensStartState() *TokensStartState { - b := NewBaseDecisionState() - - b.stateType = ATNStateTokenStart - - return &TokensStartState{BaseDecisionState: b} -} +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import "strconv" + +// Constants for serialization. +const ( + ATNStateInvalidType = 0 + ATNStateBasic = 1 + ATNStateRuleStart = 2 + ATNStateBlockStart = 3 + ATNStatePlusBlockStart = 4 + ATNStateStarBlockStart = 5 + ATNStateTokenStart = 6 + ATNStateRuleStop = 7 + ATNStateBlockEnd = 8 + ATNStateStarLoopBack = 9 + ATNStateStarLoopEntry = 10 + ATNStatePlusLoopBack = 11 + ATNStateLoopEnd = 12 + + ATNStateInvalidStateNumber = -1 +) + +var ATNStateInitialNumTransitions = 4 + +type ATNState interface { + GetEpsilonOnlyTransitions() bool + + GetRuleIndex() int + SetRuleIndex(int) + + GetNextTokenWithinRule() *IntervalSet + SetNextTokenWithinRule(*IntervalSet) + + GetATN() *ATN + SetATN(*ATN) + + GetStateType() int + + GetStateNumber() int + SetStateNumber(int) + + GetTransitions() []Transition + SetTransitions([]Transition) + AddTransition(Transition, int) + + String() string + Hash() int + Equals(Collectable[ATNState]) bool +} + +type BaseATNState struct { + // NextTokenWithinRule caches lookahead during parsing. Not used during construction. + NextTokenWithinRule *IntervalSet + + // atn is the current ATN. + atn *ATN + + epsilonOnlyTransitions bool + + // ruleIndex tracks the Rule index because there are no Rule objects at runtime. + ruleIndex int + + stateNumber int + + stateType int + + // Track the transitions emanating from this ATN state. + transitions []Transition +} + +func NewBaseATNState() *BaseATNState { + return &BaseATNState{stateNumber: ATNStateInvalidStateNumber, stateType: ATNStateInvalidType} +} + +func (as *BaseATNState) GetRuleIndex() int { + return as.ruleIndex +} + +func (as *BaseATNState) SetRuleIndex(v int) { + as.ruleIndex = v +} +func (as *BaseATNState) GetEpsilonOnlyTransitions() bool { + return as.epsilonOnlyTransitions +} + +func (as *BaseATNState) GetATN() *ATN { + return as.atn +} + +func (as *BaseATNState) SetATN(atn *ATN) { + as.atn = atn +} + +func (as *BaseATNState) GetTransitions() []Transition { + return as.transitions +} + +func (as *BaseATNState) SetTransitions(t []Transition) { + as.transitions = t +} + +func (as *BaseATNState) GetStateType() int { + return as.stateType +} + +func (as *BaseATNState) GetStateNumber() int { + return as.stateNumber +} + +func (as *BaseATNState) SetStateNumber(stateNumber int) { + as.stateNumber = stateNumber +} + +func (as *BaseATNState) GetNextTokenWithinRule() *IntervalSet { + return as.NextTokenWithinRule +} + +func (as *BaseATNState) SetNextTokenWithinRule(v *IntervalSet) { + as.NextTokenWithinRule = v +} + +func (as *BaseATNState) Hash() int { + return as.stateNumber +} + +func (as *BaseATNState) String() string { + return strconv.Itoa(as.stateNumber) +} + +func (as *BaseATNState) Equals(other Collectable[ATNState]) bool { + if ot, ok := other.(ATNState); ok { + return as.stateNumber == ot.GetStateNumber() + } + + return false +} + +func (as *BaseATNState) isNonGreedyExitState() bool { + return false +} + +func (as *BaseATNState) AddTransition(trans Transition, index int) { + if len(as.transitions) == 0 { + as.epsilonOnlyTransitions = trans.getIsEpsilon() + } else if as.epsilonOnlyTransitions != trans.getIsEpsilon() { + as.epsilonOnlyTransitions = false + } + + if index == -1 { + as.transitions = append(as.transitions, trans) + } else { + as.transitions = append(as.transitions[:index], append([]Transition{trans}, as.transitions[index:]...)...) + // TODO: as.transitions.splice(index, 1, trans) + } +} + +type BasicState struct { + *BaseATNState +} + +func NewBasicState() *BasicState { + b := NewBaseATNState() + + b.stateType = ATNStateBasic + + return &BasicState{BaseATNState: b} +} + +type DecisionState interface { + ATNState + + getDecision() int + setDecision(int) + + getNonGreedy() bool + setNonGreedy(bool) +} + +type BaseDecisionState struct { + *BaseATNState + decision int + nonGreedy bool +} + +func NewBaseDecisionState() *BaseDecisionState { + return &BaseDecisionState{BaseATNState: NewBaseATNState(), decision: -1} +} + +func (s *BaseDecisionState) getDecision() int { + return s.decision +} + +func (s *BaseDecisionState) setDecision(b int) { + s.decision = b +} + +func (s *BaseDecisionState) getNonGreedy() bool { + return s.nonGreedy +} + +func (s *BaseDecisionState) setNonGreedy(b bool) { + s.nonGreedy = b +} + +type BlockStartState interface { + DecisionState + + getEndState() *BlockEndState + setEndState(*BlockEndState) +} + +// BaseBlockStartState is the start of a regular (...) block. +type BaseBlockStartState struct { + *BaseDecisionState + endState *BlockEndState +} + +func NewBlockStartState() *BaseBlockStartState { + return &BaseBlockStartState{BaseDecisionState: NewBaseDecisionState()} +} + +func (s *BaseBlockStartState) getEndState() *BlockEndState { + return s.endState +} + +func (s *BaseBlockStartState) setEndState(b *BlockEndState) { + s.endState = b +} + +type BasicBlockStartState struct { + *BaseBlockStartState +} + +func NewBasicBlockStartState() *BasicBlockStartState { + b := NewBlockStartState() + + b.stateType = ATNStateBlockStart + + return &BasicBlockStartState{BaseBlockStartState: b} +} + +var _ BlockStartState = &BasicBlockStartState{} + +// BlockEndState is a terminal node of a simple (a|b|c) block. +type BlockEndState struct { + *BaseATNState + startState ATNState +} + +func NewBlockEndState() *BlockEndState { + b := NewBaseATNState() + + b.stateType = ATNStateBlockEnd + + return &BlockEndState{BaseATNState: b} +} + +// RuleStopState is the last node in the ATN for a rule, unless that rule is the +// start symbol. In that case, there is one transition to EOF. Later, we might +// encode references to all calls to this rule to compute FOLLOW sets for error +// handling. +type RuleStopState struct { + *BaseATNState +} + +func NewRuleStopState() *RuleStopState { + b := NewBaseATNState() + + b.stateType = ATNStateRuleStop + + return &RuleStopState{BaseATNState: b} +} + +type RuleStartState struct { + *BaseATNState + stopState ATNState + isPrecedenceRule bool +} + +func NewRuleStartState() *RuleStartState { + b := NewBaseATNState() + + b.stateType = ATNStateRuleStart + + return &RuleStartState{BaseATNState: b} +} + +// PlusLoopbackState is a decision state for A+ and (A|B)+. It has two +// transitions: one to the loop back to start of the block, and one to exit. +type PlusLoopbackState struct { + *BaseDecisionState +} + +func NewPlusLoopbackState() *PlusLoopbackState { + b := NewBaseDecisionState() + + b.stateType = ATNStatePlusLoopBack + + return &PlusLoopbackState{BaseDecisionState: b} +} + +// PlusBlockStartState is the start of a (A|B|...)+ loop. Technically it is a +// decision state; we don't use it for code generation. Somebody might need it, +// it is included for completeness. In reality, PlusLoopbackState is the real +// decision-making node for A+. +type PlusBlockStartState struct { + *BaseBlockStartState + loopBackState ATNState +} + +func NewPlusBlockStartState() *PlusBlockStartState { + b := NewBlockStartState() + + b.stateType = ATNStatePlusBlockStart + + return &PlusBlockStartState{BaseBlockStartState: b} +} + +var _ BlockStartState = &PlusBlockStartState{} + +// StarBlockStartState is the block that begins a closure loop. +type StarBlockStartState struct { + *BaseBlockStartState +} + +func NewStarBlockStartState() *StarBlockStartState { + b := NewBlockStartState() + + b.stateType = ATNStateStarBlockStart + + return &StarBlockStartState{BaseBlockStartState: b} +} + +var _ BlockStartState = &StarBlockStartState{} + +type StarLoopbackState struct { + *BaseATNState +} + +func NewStarLoopbackState() *StarLoopbackState { + b := NewBaseATNState() + + b.stateType = ATNStateStarLoopBack + + return &StarLoopbackState{BaseATNState: b} +} + +type StarLoopEntryState struct { + *BaseDecisionState + loopBackState ATNState + precedenceRuleDecision bool +} + +func NewStarLoopEntryState() *StarLoopEntryState { + b := NewBaseDecisionState() + + b.stateType = ATNStateStarLoopEntry + + // False precedenceRuleDecision indicates whether s state can benefit from a precedence DFA during SLL decision making. + return &StarLoopEntryState{BaseDecisionState: b} +} + +// LoopEndState marks the end of a * or + loop. +type LoopEndState struct { + *BaseATNState + loopBackState ATNState +} + +func NewLoopEndState() *LoopEndState { + b := NewBaseATNState() + + b.stateType = ATNStateLoopEnd + + return &LoopEndState{BaseATNState: b} +} + +// TokensStartState is the Tokens rule start state linking to each lexer rule start state. +type TokensStartState struct { + *BaseDecisionState +} + +func NewTokensStartState() *TokensStartState { + b := NewBaseDecisionState() + + b.stateType = ATNStateTokenStart + + return &TokensStartState{BaseDecisionState: b} +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_type.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_type.go index 3a515a145f..867ed682c0 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_type.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/atn_type.go @@ -1,11 +1,11 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -// Represent the type of recognizer an ATN applies to. -const ( - ATNTypeLexer = 0 - ATNTypeParser = 1 -) +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// Represent the type of recognizer an ATN applies to. +const ( + ATNTypeLexer = 0 + ATNTypeParser = 1 +) diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/char_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/char_stream.go index c33f0adb5e..d06bec337d 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/char_stream.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/char_stream.go @@ -1,12 +1,12 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -type CharStream interface { - IntStream - GetText(int, int) string - GetTextFromTokens(start, end Token) string - GetTextFromInterval(*Interval) string -} +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +type CharStream interface { + IntStream + GetText(int, int) string + GetTextFromTokens(start, end Token) string + GetTextFromInterval(*Interval) string +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_factory.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_factory.go index 1bb0314ea0..d96d8349d4 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_factory.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_factory.go @@ -1,56 +1,56 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -// TokenFactory creates CommonToken objects. -type TokenFactory interface { - Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token -} - -// CommonTokenFactory is the default TokenFactory implementation. -type CommonTokenFactory struct { - // copyText indicates whether CommonToken.setText should be called after - // constructing tokens to explicitly set the text. This is useful for cases - // where the input stream might not be able to provide arbitrary substrings of - // text from the input after the lexer creates a token (e.g. the - // implementation of CharStream.GetText in UnbufferedCharStream panics an - // UnsupportedOperationException). Explicitly setting the token text allows - // Token.GetText to be called at any time regardless of the input stream - // implementation. - // - // The default value is false to avoid the performance and memory overhead of - // copying text for every token unless explicitly requested. - copyText bool -} - -func NewCommonTokenFactory(copyText bool) *CommonTokenFactory { - return &CommonTokenFactory{copyText: copyText} -} - -// CommonTokenFactoryDEFAULT is the default CommonTokenFactory. It does not -// explicitly copy token text when constructing tokens. -var CommonTokenFactoryDEFAULT = NewCommonTokenFactory(false) - -func (c *CommonTokenFactory) Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token { - t := NewCommonToken(source, ttype, channel, start, stop) - - t.line = line - t.column = column - - if text != "" { - t.SetText(text) - } else if c.copyText && source.charStream != nil { - t.SetText(source.charStream.GetTextFromInterval(NewInterval(start, stop))) - } - - return t -} - -func (c *CommonTokenFactory) createThin(ttype int, text string) Token { - t := NewCommonToken(nil, ttype, TokenDefaultChannel, -1, -1) - t.SetText(text) - - return t -} +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// TokenFactory creates CommonToken objects. +type TokenFactory interface { + Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token +} + +// CommonTokenFactory is the default TokenFactory implementation. +type CommonTokenFactory struct { + // copyText indicates whether CommonToken.setText should be called after + // constructing tokens to explicitly set the text. This is useful for cases + // where the input stream might not be able to provide arbitrary substrings of + // text from the input after the lexer creates a token (e.g. the + // implementation of CharStream.GetText in UnbufferedCharStream panics an + // UnsupportedOperationException). Explicitly setting the token text allows + // Token.GetText to be called at any time regardless of the input stream + // implementation. + // + // The default value is false to avoid the performance and memory overhead of + // copying text for every token unless explicitly requested. + copyText bool +} + +func NewCommonTokenFactory(copyText bool) *CommonTokenFactory { + return &CommonTokenFactory{copyText: copyText} +} + +// CommonTokenFactoryDEFAULT is the default CommonTokenFactory. It does not +// explicitly copy token text when constructing tokens. +var CommonTokenFactoryDEFAULT = NewCommonTokenFactory(false) + +func (c *CommonTokenFactory) Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token { + t := NewCommonToken(source, ttype, channel, start, stop) + + t.line = line + t.column = column + + if text != "" { + t.SetText(text) + } else if c.copyText && source.charStream != nil { + t.SetText(source.charStream.GetTextFromInterval(NewInterval(start, stop))) + } + + return t +} + +func (c *CommonTokenFactory) createThin(ttype int, text string) Token { + t := NewCommonToken(nil, ttype, TokenDefaultChannel, -1, -1) + t.SetText(text) + + return t +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_stream.go index c6c9485a20..34d1adda73 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_stream.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/common_token_stream.go @@ -1,449 +1,449 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "strconv" -) - -// CommonTokenStream is an implementation of TokenStream that loads tokens from -// a TokenSource on-demand and places the tokens in a buffer to provide access -// to any previous token by index. This token stream ignores the value of -// Token.getChannel. If your parser requires the token stream filter tokens to -// only those on a particular channel, such as Token.DEFAULT_CHANNEL or -// Token.HIDDEN_CHANNEL, use a filtering token stream such a CommonTokenStream. -type CommonTokenStream struct { - channel int - - // fetchedEOF indicates whether the Token.EOF token has been fetched from - // tokenSource and added to tokens. This field improves performance for the - // following cases: - // - // consume: The lookahead check in consume to preven consuming the EOF symbol is - // optimized by checking the values of fetchedEOF and p instead of calling LA. - // - // fetch: The check to prevent adding multiple EOF symbols into tokens is - // trivial with bt field. - fetchedEOF bool - - // index indexs into tokens of the current token (next token to consume). - // tokens[p] should be LT(1). It is set to -1 when the stream is first - // constructed or when SetTokenSource is called, indicating that the first token - // has not yet been fetched from the token source. For additional information, - // see the documentation of IntStream for a description of initializing methods. - index int - - // tokenSource is the TokenSource from which tokens for the bt stream are - // fetched. - tokenSource TokenSource - - // tokens is all tokens fetched from the token source. The list is considered a - // complete view of the input once fetchedEOF is set to true. - tokens []Token -} - -func NewCommonTokenStream(lexer Lexer, channel int) *CommonTokenStream { - return &CommonTokenStream{ - channel: channel, - index: -1, - tokenSource: lexer, - tokens: make([]Token, 0), - } -} - -func (c *CommonTokenStream) GetAllTokens() []Token { - return c.tokens -} - -func (c *CommonTokenStream) Mark() int { - return 0 -} - -func (c *CommonTokenStream) Release(marker int) {} - -func (c *CommonTokenStream) reset() { - c.Seek(0) -} - -func (c *CommonTokenStream) Seek(index int) { - c.lazyInit() - c.index = c.adjustSeekIndex(index) -} - -func (c *CommonTokenStream) Get(index int) Token { - c.lazyInit() - - return c.tokens[index] -} - -func (c *CommonTokenStream) Consume() { - SkipEOFCheck := false - - if c.index >= 0 { - if c.fetchedEOF { - // The last token in tokens is EOF. Skip the check if p indexes any fetched. - // token except the last. - SkipEOFCheck = c.index < len(c.tokens)-1 - } else { - // No EOF token in tokens. Skip the check if p indexes a fetched token. - SkipEOFCheck = c.index < len(c.tokens) - } - } else { - // Not yet initialized - SkipEOFCheck = false - } - - if !SkipEOFCheck && c.LA(1) == TokenEOF { - panic("cannot consume EOF") - } - - if c.Sync(c.index + 1) { - c.index = c.adjustSeekIndex(c.index + 1) - } -} - -// Sync makes sure index i in tokens has a token and returns true if a token is -// located at index i and otherwise false. -func (c *CommonTokenStream) Sync(i int) bool { - n := i - len(c.tokens) + 1 // TODO: How many more elements do we need? - - if n > 0 { - fetched := c.fetch(n) - return fetched >= n - } - - return true -} - -// fetch adds n elements to buffer and returns the actual number of elements -// added to the buffer. -func (c *CommonTokenStream) fetch(n int) int { - if c.fetchedEOF { - return 0 - } - - for i := 0; i < n; i++ { - t := c.tokenSource.NextToken() - - t.SetTokenIndex(len(c.tokens)) - c.tokens = append(c.tokens, t) - - if t.GetTokenType() == TokenEOF { - c.fetchedEOF = true - - return i + 1 - } - } - - return n -} - -// GetTokens gets all tokens from start to stop inclusive. -func (c *CommonTokenStream) GetTokens(start int, stop int, types *IntervalSet) []Token { - if start < 0 || stop < 0 { - return nil - } - - c.lazyInit() - - subset := make([]Token, 0) - - if stop >= len(c.tokens) { - stop = len(c.tokens) - 1 - } - - for i := start; i < stop; i++ { - t := c.tokens[i] - - if t.GetTokenType() == TokenEOF { - break - } - - if types == nil || types.contains(t.GetTokenType()) { - subset = append(subset, t) - } - } - - return subset -} - -func (c *CommonTokenStream) LA(i int) int { - return c.LT(i).GetTokenType() -} - -func (c *CommonTokenStream) lazyInit() { - if c.index == -1 { - c.setup() - } -} - -func (c *CommonTokenStream) setup() { - c.Sync(0) - c.index = c.adjustSeekIndex(0) -} - -func (c *CommonTokenStream) GetTokenSource() TokenSource { - return c.tokenSource -} - -// SetTokenSource resets the c token stream by setting its token source. -func (c *CommonTokenStream) SetTokenSource(tokenSource TokenSource) { - c.tokenSource = tokenSource - c.tokens = make([]Token, 0) - c.index = -1 -} - -// NextTokenOnChannel returns the index of the next token on channel given a -// starting index. Returns i if tokens[i] is on channel. Returns -1 if there are -// no tokens on channel between i and EOF. -func (c *CommonTokenStream) NextTokenOnChannel(i, channel int) int { - c.Sync(i) - - if i >= len(c.tokens) { - return -1 - } - - token := c.tokens[i] - - for token.GetChannel() != c.channel { - if token.GetTokenType() == TokenEOF { - return -1 - } - - i++ - c.Sync(i) - token = c.tokens[i] - } - - return i -} - -// previousTokenOnChannel returns the index of the previous token on channel -// given a starting index. Returns i if tokens[i] is on channel. Returns -1 if -// there are no tokens on channel between i and 0. -func (c *CommonTokenStream) previousTokenOnChannel(i, channel int) int { - for i >= 0 && c.tokens[i].GetChannel() != channel { - i-- - } - - return i -} - -// GetHiddenTokensToRight collects all tokens on a specified channel to the -// right of the current token up until we see a token on DEFAULT_TOKEN_CHANNEL -// or EOF. If channel is -1, it finds any non-default channel token. -func (c *CommonTokenStream) GetHiddenTokensToRight(tokenIndex, channel int) []Token { - c.lazyInit() - - if tokenIndex < 0 || tokenIndex >= len(c.tokens) { - panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(c.tokens)-1)) - } - - nextOnChannel := c.NextTokenOnChannel(tokenIndex+1, LexerDefaultTokenChannel) - from := tokenIndex + 1 - - // If no onchannel to the right, then nextOnChannel == -1, so set to to last token - var to int - - if nextOnChannel == -1 { - to = len(c.tokens) - 1 - } else { - to = nextOnChannel - } - - return c.filterForChannel(from, to, channel) -} - -// GetHiddenTokensToLeft collects all tokens on channel to the left of the -// current token until we see a token on DEFAULT_TOKEN_CHANNEL. If channel is -// -1, it finds any non default channel token. -func (c *CommonTokenStream) GetHiddenTokensToLeft(tokenIndex, channel int) []Token { - c.lazyInit() - - if tokenIndex < 0 || tokenIndex >= len(c.tokens) { - panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(c.tokens)-1)) - } - - prevOnChannel := c.previousTokenOnChannel(tokenIndex-1, LexerDefaultTokenChannel) - - if prevOnChannel == tokenIndex-1 { - return nil - } - - // If there are none on channel to the left and prevOnChannel == -1 then from = 0 - from := prevOnChannel + 1 - to := tokenIndex - 1 - - return c.filterForChannel(from, to, channel) -} - -func (c *CommonTokenStream) filterForChannel(left, right, channel int) []Token { - hidden := make([]Token, 0) - - for i := left; i < right+1; i++ { - t := c.tokens[i] - - if channel == -1 { - if t.GetChannel() != LexerDefaultTokenChannel { - hidden = append(hidden, t) - } - } else if t.GetChannel() == channel { - hidden = append(hidden, t) - } - } - - if len(hidden) == 0 { - return nil - } - - return hidden -} - -func (c *CommonTokenStream) GetSourceName() string { - return c.tokenSource.GetSourceName() -} - -func (c *CommonTokenStream) Size() int { - return len(c.tokens) -} - -func (c *CommonTokenStream) Index() int { - return c.index -} - -func (c *CommonTokenStream) GetAllText() string { - return c.GetTextFromInterval(nil) -} - -func (c *CommonTokenStream) GetTextFromTokens(start, end Token) string { - if start == nil || end == nil { - return "" - } - - return c.GetTextFromInterval(NewInterval(start.GetTokenIndex(), end.GetTokenIndex())) -} - -func (c *CommonTokenStream) GetTextFromRuleContext(interval RuleContext) string { - return c.GetTextFromInterval(interval.GetSourceInterval()) -} - -func (c *CommonTokenStream) GetTextFromInterval(interval *Interval) string { - c.lazyInit() - - if interval == nil { - c.Fill() - interval = NewInterval(0, len(c.tokens)-1) - } else { - c.Sync(interval.Stop) - } - - start := interval.Start - stop := interval.Stop - - if start < 0 || stop < 0 { - return "" - } - - if stop >= len(c.tokens) { - stop = len(c.tokens) - 1 - } - - s := "" - - for i := start; i < stop+1; i++ { - t := c.tokens[i] - - if t.GetTokenType() == TokenEOF { - break - } - - s += t.GetText() - } - - return s -} - -// Fill gets all tokens from the lexer until EOF. -func (c *CommonTokenStream) Fill() { - c.lazyInit() - - for c.fetch(1000) == 1000 { - continue - } -} - -func (c *CommonTokenStream) adjustSeekIndex(i int) int { - return c.NextTokenOnChannel(i, c.channel) -} - -func (c *CommonTokenStream) LB(k int) Token { - if k == 0 || c.index-k < 0 { - return nil - } - - i := c.index - n := 1 - - // Find k good tokens looking backward - for n <= k { - // Skip off-channel tokens - i = c.previousTokenOnChannel(i-1, c.channel) - n++ - } - - if i < 0 { - return nil - } - - return c.tokens[i] -} - -func (c *CommonTokenStream) LT(k int) Token { - c.lazyInit() - - if k == 0 { - return nil - } - - if k < 0 { - return c.LB(-k) - } - - i := c.index - n := 1 // We know tokens[n] is valid - - // Find k good tokens - for n < k { - // Skip off-channel tokens, but make sure to not look past EOF - if c.Sync(i + 1) { - i = c.NextTokenOnChannel(i+1, c.channel) - } - - n++ - } - - return c.tokens[i] -} - -// getNumberOfOnChannelTokens counts EOF once. -func (c *CommonTokenStream) getNumberOfOnChannelTokens() int { - var n int - - c.Fill() - - for i := 0; i < len(c.tokens); i++ { - t := c.tokens[i] - - if t.GetChannel() == c.channel { - n++ - } - - if t.GetTokenType() == TokenEOF { - break - } - } - - return n -} +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "strconv" +) + +// CommonTokenStream is an implementation of TokenStream that loads tokens from +// a TokenSource on-demand and places the tokens in a buffer to provide access +// to any previous token by index. This token stream ignores the value of +// Token.getChannel. If your parser requires the token stream filter tokens to +// only those on a particular channel, such as Token.DEFAULT_CHANNEL or +// Token.HIDDEN_CHANNEL, use a filtering token stream such a CommonTokenStream. +type CommonTokenStream struct { + channel int + + // fetchedEOF indicates whether the Token.EOF token has been fetched from + // tokenSource and added to tokens. This field improves performance for the + // following cases: + // + // consume: The lookahead check in consume to preven consuming the EOF symbol is + // optimized by checking the values of fetchedEOF and p instead of calling LA. + // + // fetch: The check to prevent adding multiple EOF symbols into tokens is + // trivial with bt field. + fetchedEOF bool + + // index indexs into tokens of the current token (next token to consume). + // tokens[p] should be LT(1). It is set to -1 when the stream is first + // constructed or when SetTokenSource is called, indicating that the first token + // has not yet been fetched from the token source. For additional information, + // see the documentation of IntStream for a description of initializing methods. + index int + + // tokenSource is the TokenSource from which tokens for the bt stream are + // fetched. + tokenSource TokenSource + + // tokens is all tokens fetched from the token source. The list is considered a + // complete view of the input once fetchedEOF is set to true. + tokens []Token +} + +func NewCommonTokenStream(lexer Lexer, channel int) *CommonTokenStream { + return &CommonTokenStream{ + channel: channel, + index: -1, + tokenSource: lexer, + tokens: make([]Token, 0), + } +} + +func (c *CommonTokenStream) GetAllTokens() []Token { + return c.tokens +} + +func (c *CommonTokenStream) Mark() int { + return 0 +} + +func (c *CommonTokenStream) Release(marker int) {} + +func (c *CommonTokenStream) reset() { + c.Seek(0) +} + +func (c *CommonTokenStream) Seek(index int) { + c.lazyInit() + c.index = c.adjustSeekIndex(index) +} + +func (c *CommonTokenStream) Get(index int) Token { + c.lazyInit() + + return c.tokens[index] +} + +func (c *CommonTokenStream) Consume() { + SkipEOFCheck := false + + if c.index >= 0 { + if c.fetchedEOF { + // The last token in tokens is EOF. Skip the check if p indexes any fetched. + // token except the last. + SkipEOFCheck = c.index < len(c.tokens)-1 + } else { + // No EOF token in tokens. Skip the check if p indexes a fetched token. + SkipEOFCheck = c.index < len(c.tokens) + } + } else { + // Not yet initialized + SkipEOFCheck = false + } + + if !SkipEOFCheck && c.LA(1) == TokenEOF { + panic("cannot consume EOF") + } + + if c.Sync(c.index + 1) { + c.index = c.adjustSeekIndex(c.index + 1) + } +} + +// Sync makes sure index i in tokens has a token and returns true if a token is +// located at index i and otherwise false. +func (c *CommonTokenStream) Sync(i int) bool { + n := i - len(c.tokens) + 1 // TODO: How many more elements do we need? + + if n > 0 { + fetched := c.fetch(n) + return fetched >= n + } + + return true +} + +// fetch adds n elements to buffer and returns the actual number of elements +// added to the buffer. +func (c *CommonTokenStream) fetch(n int) int { + if c.fetchedEOF { + return 0 + } + + for i := 0; i < n; i++ { + t := c.tokenSource.NextToken() + + t.SetTokenIndex(len(c.tokens)) + c.tokens = append(c.tokens, t) + + if t.GetTokenType() == TokenEOF { + c.fetchedEOF = true + + return i + 1 + } + } + + return n +} + +// GetTokens gets all tokens from start to stop inclusive. +func (c *CommonTokenStream) GetTokens(start int, stop int, types *IntervalSet) []Token { + if start < 0 || stop < 0 { + return nil + } + + c.lazyInit() + + subset := make([]Token, 0) + + if stop >= len(c.tokens) { + stop = len(c.tokens) - 1 + } + + for i := start; i < stop; i++ { + t := c.tokens[i] + + if t.GetTokenType() == TokenEOF { + break + } + + if types == nil || types.contains(t.GetTokenType()) { + subset = append(subset, t) + } + } + + return subset +} + +func (c *CommonTokenStream) LA(i int) int { + return c.LT(i).GetTokenType() +} + +func (c *CommonTokenStream) lazyInit() { + if c.index == -1 { + c.setup() + } +} + +func (c *CommonTokenStream) setup() { + c.Sync(0) + c.index = c.adjustSeekIndex(0) +} + +func (c *CommonTokenStream) GetTokenSource() TokenSource { + return c.tokenSource +} + +// SetTokenSource resets the c token stream by setting its token source. +func (c *CommonTokenStream) SetTokenSource(tokenSource TokenSource) { + c.tokenSource = tokenSource + c.tokens = make([]Token, 0) + c.index = -1 +} + +// NextTokenOnChannel returns the index of the next token on channel given a +// starting index. Returns i if tokens[i] is on channel. Returns -1 if there are +// no tokens on channel between i and EOF. +func (c *CommonTokenStream) NextTokenOnChannel(i, channel int) int { + c.Sync(i) + + if i >= len(c.tokens) { + return -1 + } + + token := c.tokens[i] + + for token.GetChannel() != c.channel { + if token.GetTokenType() == TokenEOF { + return -1 + } + + i++ + c.Sync(i) + token = c.tokens[i] + } + + return i +} + +// previousTokenOnChannel returns the index of the previous token on channel +// given a starting index. Returns i if tokens[i] is on channel. Returns -1 if +// there are no tokens on channel between i and 0. +func (c *CommonTokenStream) previousTokenOnChannel(i, channel int) int { + for i >= 0 && c.tokens[i].GetChannel() != channel { + i-- + } + + return i +} + +// GetHiddenTokensToRight collects all tokens on a specified channel to the +// right of the current token up until we see a token on DEFAULT_TOKEN_CHANNEL +// or EOF. If channel is -1, it finds any non-default channel token. +func (c *CommonTokenStream) GetHiddenTokensToRight(tokenIndex, channel int) []Token { + c.lazyInit() + + if tokenIndex < 0 || tokenIndex >= len(c.tokens) { + panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(c.tokens)-1)) + } + + nextOnChannel := c.NextTokenOnChannel(tokenIndex+1, LexerDefaultTokenChannel) + from := tokenIndex + 1 + + // If no onchannel to the right, then nextOnChannel == -1, so set to to last token + var to int + + if nextOnChannel == -1 { + to = len(c.tokens) - 1 + } else { + to = nextOnChannel + } + + return c.filterForChannel(from, to, channel) +} + +// GetHiddenTokensToLeft collects all tokens on channel to the left of the +// current token until we see a token on DEFAULT_TOKEN_CHANNEL. If channel is +// -1, it finds any non default channel token. +func (c *CommonTokenStream) GetHiddenTokensToLeft(tokenIndex, channel int) []Token { + c.lazyInit() + + if tokenIndex < 0 || tokenIndex >= len(c.tokens) { + panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(c.tokens)-1)) + } + + prevOnChannel := c.previousTokenOnChannel(tokenIndex-1, LexerDefaultTokenChannel) + + if prevOnChannel == tokenIndex-1 { + return nil + } + + // If there are none on channel to the left and prevOnChannel == -1 then from = 0 + from := prevOnChannel + 1 + to := tokenIndex - 1 + + return c.filterForChannel(from, to, channel) +} + +func (c *CommonTokenStream) filterForChannel(left, right, channel int) []Token { + hidden := make([]Token, 0) + + for i := left; i < right+1; i++ { + t := c.tokens[i] + + if channel == -1 { + if t.GetChannel() != LexerDefaultTokenChannel { + hidden = append(hidden, t) + } + } else if t.GetChannel() == channel { + hidden = append(hidden, t) + } + } + + if len(hidden) == 0 { + return nil + } + + return hidden +} + +func (c *CommonTokenStream) GetSourceName() string { + return c.tokenSource.GetSourceName() +} + +func (c *CommonTokenStream) Size() int { + return len(c.tokens) +} + +func (c *CommonTokenStream) Index() int { + return c.index +} + +func (c *CommonTokenStream) GetAllText() string { + return c.GetTextFromInterval(nil) +} + +func (c *CommonTokenStream) GetTextFromTokens(start, end Token) string { + if start == nil || end == nil { + return "" + } + + return c.GetTextFromInterval(NewInterval(start.GetTokenIndex(), end.GetTokenIndex())) +} + +func (c *CommonTokenStream) GetTextFromRuleContext(interval RuleContext) string { + return c.GetTextFromInterval(interval.GetSourceInterval()) +} + +func (c *CommonTokenStream) GetTextFromInterval(interval *Interval) string { + c.lazyInit() + + if interval == nil { + c.Fill() + interval = NewInterval(0, len(c.tokens)-1) + } else { + c.Sync(interval.Stop) + } + + start := interval.Start + stop := interval.Stop + + if start < 0 || stop < 0 { + return "" + } + + if stop >= len(c.tokens) { + stop = len(c.tokens) - 1 + } + + s := "" + + for i := start; i < stop+1; i++ { + t := c.tokens[i] + + if t.GetTokenType() == TokenEOF { + break + } + + s += t.GetText() + } + + return s +} + +// Fill gets all tokens from the lexer until EOF. +func (c *CommonTokenStream) Fill() { + c.lazyInit() + + for c.fetch(1000) == 1000 { + continue + } +} + +func (c *CommonTokenStream) adjustSeekIndex(i int) int { + return c.NextTokenOnChannel(i, c.channel) +} + +func (c *CommonTokenStream) LB(k int) Token { + if k == 0 || c.index-k < 0 { + return nil + } + + i := c.index + n := 1 + + // Find k good tokens looking backward + for n <= k { + // Skip off-channel tokens + i = c.previousTokenOnChannel(i-1, c.channel) + n++ + } + + if i < 0 { + return nil + } + + return c.tokens[i] +} + +func (c *CommonTokenStream) LT(k int) Token { + c.lazyInit() + + if k == 0 { + return nil + } + + if k < 0 { + return c.LB(-k) + } + + i := c.index + n := 1 // We know tokens[n] is valid + + // Find k good tokens + for n < k { + // Skip off-channel tokens, but make sure to not look past EOF + if c.Sync(i + 1) { + i = c.NextTokenOnChannel(i+1, c.channel) + } + + n++ + } + + return c.tokens[i] +} + +// getNumberOfOnChannelTokens counts EOF once. +func (c *CommonTokenStream) getNumberOfOnChannelTokens() int { + var n int + + c.Fill() + + for i := 0; i < len(c.tokens); i++ { + t := c.tokens[i] + + if t.GetChannel() == c.channel { + n++ + } + + if t.GetTokenType() == TokenEOF { + break + } + } + + return n +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/comparators.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/comparators.go index 9ea3200536..2ba2c5af45 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/comparators.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/comparators.go @@ -1,147 +1,147 @@ -package antlr - -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -// This file contains all the implementations of custom comparators used for generic collections when the -// Hash() and Equals() funcs supplied by the struct objects themselves need to be overridden. Normally, we would -// put the comparators in the source file for the struct themselves, but given the organization of this code is -// sorta kinda based upon the Java code, I found it confusing trying to find out which comparator was where and used by -// which instantiation of a collection. For instance, an Array2DHashSet in the Java source, when used with ATNConfig -// collections requires three different comparators depending on what the collection is being used for. Collecting - pun intended - -// all the comparators here, makes it much easier to see which implementation of hash and equals is used by which collection. -// It also makes it easy to verify that the Hash() and Equals() functions marry up with the Java implementations. - -// ObjEqComparator is the equivalent of the Java ObjectEqualityComparator, which is the default instance of -// Equality comparator. We do not have inheritance in Go, only interfaces, so we use generics to enforce some -// type safety and avoid having to implement this for every type that we want to perform comparison on. -// -// This comparator works by using the standard Hash() and Equals() methods of the type T that is being compared. Which -// allows us to use it in any collection instance that does nto require a special hash or equals implementation. -type ObjEqComparator[T Collectable[T]] struct{} - -var ( - aStateEqInst = &ObjEqComparator[ATNState]{} - aConfEqInst = &ObjEqComparator[ATNConfig]{} - aConfCompInst = &ATNConfigComparator[ATNConfig]{} - atnConfCompInst = &BaseATNConfigComparator[ATNConfig]{} - dfaStateEqInst = &ObjEqComparator[*DFAState]{} - semctxEqInst = &ObjEqComparator[SemanticContext]{} - atnAltCfgEqInst = &ATNAltConfigComparator[ATNConfig]{} -) - -// Equals2 delegates to the Equals() method of type T -func (c *ObjEqComparator[T]) Equals2(o1, o2 T) bool { - return o1.Equals(o2) -} - -// Hash1 delegates to the Hash() method of type T -func (c *ObjEqComparator[T]) Hash1(o T) int { - - return o.Hash() -} - -type SemCComparator[T Collectable[T]] struct{} - -// ATNConfigComparator is used as the compartor for the configLookup field of an ATNConfigSet -// and has a custom Equals() and Hash() implementation, because equality is not based on the -// standard Hash() and Equals() methods of the ATNConfig type. -type ATNConfigComparator[T Collectable[T]] struct { -} - -// Equals2 is a custom comparator for ATNConfigs specifically for configLookup -func (c *ATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool { - - // Same pointer, must be equal, even if both nil - // - if o1 == o2 { - return true - - } - - // If either are nil, but not both, then the result is false - // - if o1 == nil || o2 == nil { - return false - } - - return o1.GetState().GetStateNumber() == o2.GetState().GetStateNumber() && - o1.GetAlt() == o2.GetAlt() && - o1.GetSemanticContext().Equals(o2.GetSemanticContext()) -} - -// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup -func (c *ATNConfigComparator[T]) Hash1(o ATNConfig) int { - hash := 7 - hash = 31*hash + o.GetState().GetStateNumber() - hash = 31*hash + o.GetAlt() - hash = 31*hash + o.GetSemanticContext().Hash() - return hash -} - -// ATNAltConfigComparator is used as the comparator for mapping configs to Alt Bitsets -type ATNAltConfigComparator[T Collectable[T]] struct { -} - -// Equals2 is a custom comparator for ATNConfigs specifically for configLookup -func (c *ATNAltConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool { - - // Same pointer, must be equal, even if both nil - // - if o1 == o2 { - return true - - } - - // If either are nil, but not both, then the result is false - // - if o1 == nil || o2 == nil { - return false - } - - return o1.GetState().GetStateNumber() == o2.GetState().GetStateNumber() && - o1.GetContext().Equals(o2.GetContext()) -} - -// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup -func (c *ATNAltConfigComparator[T]) Hash1(o ATNConfig) int { - h := murmurInit(7) - h = murmurUpdate(h, o.GetState().GetStateNumber()) - h = murmurUpdate(h, o.GetContext().Hash()) - return murmurFinish(h, 2) -} - -// BaseATNConfigComparator is used as the comparator for the configLookup field of a BaseATNConfigSet -// and has a custom Equals() and Hash() implementation, because equality is not based on the -// standard Hash() and Equals() methods of the ATNConfig type. -type BaseATNConfigComparator[T Collectable[T]] struct { -} - -// Equals2 is a custom comparator for ATNConfigs specifically for baseATNConfigSet -func (c *BaseATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool { - - // Same pointer, must be equal, even if both nil - // - if o1 == o2 { - return true - - } - - // If either are nil, but not both, then the result is false - // - if o1 == nil || o2 == nil { - return false - } - - return o1.GetState().GetStateNumber() == o2.GetState().GetStateNumber() && - o1.GetAlt() == o2.GetAlt() && - o1.GetSemanticContext().Equals(o2.GetSemanticContext()) -} - -// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup, but in fact just -// delegates to the standard Hash() method of the ATNConfig type. -func (c *BaseATNConfigComparator[T]) Hash1(o ATNConfig) int { - - return o.Hash() -} +package antlr + +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +// This file contains all the implementations of custom comparators used for generic collections when the +// Hash() and Equals() funcs supplied by the struct objects themselves need to be overridden. Normally, we would +// put the comparators in the source file for the struct themselves, but given the organization of this code is +// sorta kinda based upon the Java code, I found it confusing trying to find out which comparator was where and used by +// which instantiation of a collection. For instance, an Array2DHashSet in the Java source, when used with ATNConfig +// collections requires three different comparators depending on what the collection is being used for. Collecting - pun intended - +// all the comparators here, makes it much easier to see which implementation of hash and equals is used by which collection. +// It also makes it easy to verify that the Hash() and Equals() functions marry up with the Java implementations. + +// ObjEqComparator is the equivalent of the Java ObjectEqualityComparator, which is the default instance of +// Equality comparator. We do not have inheritance in Go, only interfaces, so we use generics to enforce some +// type safety and avoid having to implement this for every type that we want to perform comparison on. +// +// This comparator works by using the standard Hash() and Equals() methods of the type T that is being compared. Which +// allows us to use it in any collection instance that does nto require a special hash or equals implementation. +type ObjEqComparator[T Collectable[T]] struct{} + +var ( + aStateEqInst = &ObjEqComparator[ATNState]{} + aConfEqInst = &ObjEqComparator[ATNConfig]{} + aConfCompInst = &ATNConfigComparator[ATNConfig]{} + atnConfCompInst = &BaseATNConfigComparator[ATNConfig]{} + dfaStateEqInst = &ObjEqComparator[*DFAState]{} + semctxEqInst = &ObjEqComparator[SemanticContext]{} + atnAltCfgEqInst = &ATNAltConfigComparator[ATNConfig]{} +) + +// Equals2 delegates to the Equals() method of type T +func (c *ObjEqComparator[T]) Equals2(o1, o2 T) bool { + return o1.Equals(o2) +} + +// Hash1 delegates to the Hash() method of type T +func (c *ObjEqComparator[T]) Hash1(o T) int { + + return o.Hash() +} + +type SemCComparator[T Collectable[T]] struct{} + +// ATNConfigComparator is used as the compartor for the configLookup field of an ATNConfigSet +// and has a custom Equals() and Hash() implementation, because equality is not based on the +// standard Hash() and Equals() methods of the ATNConfig type. +type ATNConfigComparator[T Collectable[T]] struct { +} + +// Equals2 is a custom comparator for ATNConfigs specifically for configLookup +func (c *ATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool { + + // Same pointer, must be equal, even if both nil + // + if o1 == o2 { + return true + + } + + // If either are nil, but not both, then the result is false + // + if o1 == nil || o2 == nil { + return false + } + + return o1.GetState().GetStateNumber() == o2.GetState().GetStateNumber() && + o1.GetAlt() == o2.GetAlt() && + o1.GetSemanticContext().Equals(o2.GetSemanticContext()) +} + +// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup +func (c *ATNConfigComparator[T]) Hash1(o ATNConfig) int { + hash := 7 + hash = 31*hash + o.GetState().GetStateNumber() + hash = 31*hash + o.GetAlt() + hash = 31*hash + o.GetSemanticContext().Hash() + return hash +} + +// ATNAltConfigComparator is used as the comparator for mapping configs to Alt Bitsets +type ATNAltConfigComparator[T Collectable[T]] struct { +} + +// Equals2 is a custom comparator for ATNConfigs specifically for configLookup +func (c *ATNAltConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool { + + // Same pointer, must be equal, even if both nil + // + if o1 == o2 { + return true + + } + + // If either are nil, but not both, then the result is false + // + if o1 == nil || o2 == nil { + return false + } + + return o1.GetState().GetStateNumber() == o2.GetState().GetStateNumber() && + o1.GetContext().Equals(o2.GetContext()) +} + +// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup +func (c *ATNAltConfigComparator[T]) Hash1(o ATNConfig) int { + h := murmurInit(7) + h = murmurUpdate(h, o.GetState().GetStateNumber()) + h = murmurUpdate(h, o.GetContext().Hash()) + return murmurFinish(h, 2) +} + +// BaseATNConfigComparator is used as the comparator for the configLookup field of a BaseATNConfigSet +// and has a custom Equals() and Hash() implementation, because equality is not based on the +// standard Hash() and Equals() methods of the ATNConfig type. +type BaseATNConfigComparator[T Collectable[T]] struct { +} + +// Equals2 is a custom comparator for ATNConfigs specifically for baseATNConfigSet +func (c *BaseATNConfigComparator[T]) Equals2(o1, o2 ATNConfig) bool { + + // Same pointer, must be equal, even if both nil + // + if o1 == o2 { + return true + + } + + // If either are nil, but not both, then the result is false + // + if o1 == nil || o2 == nil { + return false + } + + return o1.GetState().GetStateNumber() == o2.GetState().GetStateNumber() && + o1.GetAlt() == o2.GetAlt() && + o1.GetSemanticContext().Equals(o2.GetSemanticContext()) +} + +// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup, but in fact just +// delegates to the standard Hash() method of the ATNConfig type. +func (c *BaseATNConfigComparator[T]) Hash1(o ATNConfig) int { + + return o.Hash() +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa.go index bfd43e1f73..6c7975336b 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa.go @@ -1,148 +1,148 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -type DFA struct { - // atnStartState is the ATN state in which this was created - atnStartState DecisionState - - decision int - - // states is all the DFA states. Use Map to get the old state back; Set can only - // indicate whether it is there. Go maps implement key hash collisions and so on and are very - // good, but the DFAState is an object and can't be used directly as the key as it can in say JAva - // amd C#, whereby if the hashcode is the same for two objects, then Equals() is called against them - // to see if they really are the same object. - // - // - states *JStore[*DFAState, *ObjEqComparator[*DFAState]] - - numstates int - - s0 *DFAState - - // precedenceDfa is the backing field for isPrecedenceDfa and setPrecedenceDfa. - // True if the DFA is for a precedence decision and false otherwise. - precedenceDfa bool -} - -func NewDFA(atnStartState DecisionState, decision int) *DFA { - dfa := &DFA{ - atnStartState: atnStartState, - decision: decision, - states: NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst), - } - if s, ok := atnStartState.(*StarLoopEntryState); ok && s.precedenceRuleDecision { - dfa.precedenceDfa = true - dfa.s0 = NewDFAState(-1, NewBaseATNConfigSet(false)) - dfa.s0.isAcceptState = false - dfa.s0.requiresFullContext = false - } - return dfa -} - -// getPrecedenceStartState gets the start state for the current precedence and -// returns the start state corresponding to the specified precedence if a start -// state exists for the specified precedence and nil otherwise. d must be a -// precedence DFA. See also isPrecedenceDfa. -func (d *DFA) getPrecedenceStartState(precedence int) *DFAState { - if !d.getPrecedenceDfa() { - panic("only precedence DFAs may contain a precedence start state") - } - - // s0.edges is never nil for a precedence DFA - if precedence < 0 || precedence >= len(d.getS0().getEdges()) { - return nil - } - - return d.getS0().getIthEdge(precedence) -} - -// setPrecedenceStartState sets the start state for the current precedence. d -// must be a precedence DFA. See also isPrecedenceDfa. -func (d *DFA) setPrecedenceStartState(precedence int, startState *DFAState) { - if !d.getPrecedenceDfa() { - panic("only precedence DFAs may contain a precedence start state") - } - - if precedence < 0 { - return - } - - // Synchronization on s0 here is ok. When the DFA is turned into a - // precedence DFA, s0 will be initialized once and not updated again. s0.edges - // is never nil for a precedence DFA. - s0 := d.getS0() - if precedence >= s0.numEdges() { - edges := append(s0.getEdges(), make([]*DFAState, precedence+1-s0.numEdges())...) - s0.setEdges(edges) - d.setS0(s0) - } - - s0.setIthEdge(precedence, startState) -} - -func (d *DFA) getPrecedenceDfa() bool { - return d.precedenceDfa -} - -// setPrecedenceDfa sets whether d is a precedence DFA. If precedenceDfa differs -// from the current DFA configuration, then d.states is cleared, the initial -// state s0 is set to a new DFAState with an empty outgoing DFAState.edges to -// store the start states for individual precedence values if precedenceDfa is -// true or nil otherwise, and d.precedenceDfa is updated. -func (d *DFA) setPrecedenceDfa(precedenceDfa bool) { - if d.getPrecedenceDfa() != precedenceDfa { - d.states = NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst) - d.numstates = 0 - - if precedenceDfa { - precedenceState := NewDFAState(-1, NewBaseATNConfigSet(false)) - - precedenceState.setEdges(make([]*DFAState, 0)) - precedenceState.isAcceptState = false - precedenceState.requiresFullContext = false - d.setS0(precedenceState) - } else { - d.setS0(nil) - } - - d.precedenceDfa = precedenceDfa - } -} - -func (d *DFA) getS0() *DFAState { - return d.s0 -} - -func (d *DFA) setS0(s *DFAState) { - d.s0 = s -} - -// sortedStates returns the states in d sorted by their state number. -func (d *DFA) sortedStates() []*DFAState { - - vs := d.states.SortedSlice(func(i, j *DFAState) bool { - return i.stateNumber < j.stateNumber - }) - - return vs -} - -func (d *DFA) String(literalNames []string, symbolicNames []string) string { - if d.getS0() == nil { - return "" - } - - return NewDFASerializer(d, literalNames, symbolicNames).String() -} - -func (d *DFA) ToLexerString() string { - if d.getS0() == nil { - return "" - } - - return NewLexerDFASerializer(d).String() -} +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +type DFA struct { + // atnStartState is the ATN state in which this was created + atnStartState DecisionState + + decision int + + // states is all the DFA states. Use Map to get the old state back; Set can only + // indicate whether it is there. Go maps implement key hash collisions and so on and are very + // good, but the DFAState is an object and can't be used directly as the key as it can in say JAva + // amd C#, whereby if the hashcode is the same for two objects, then Equals() is called against them + // to see if they really are the same object. + // + // + states *JStore[*DFAState, *ObjEqComparator[*DFAState]] + + numstates int + + s0 *DFAState + + // precedenceDfa is the backing field for isPrecedenceDfa and setPrecedenceDfa. + // True if the DFA is for a precedence decision and false otherwise. + precedenceDfa bool +} + +func NewDFA(atnStartState DecisionState, decision int) *DFA { + dfa := &DFA{ + atnStartState: atnStartState, + decision: decision, + states: NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst), + } + if s, ok := atnStartState.(*StarLoopEntryState); ok && s.precedenceRuleDecision { + dfa.precedenceDfa = true + dfa.s0 = NewDFAState(-1, NewBaseATNConfigSet(false)) + dfa.s0.isAcceptState = false + dfa.s0.requiresFullContext = false + } + return dfa +} + +// getPrecedenceStartState gets the start state for the current precedence and +// returns the start state corresponding to the specified precedence if a start +// state exists for the specified precedence and nil otherwise. d must be a +// precedence DFA. See also isPrecedenceDfa. +func (d *DFA) getPrecedenceStartState(precedence int) *DFAState { + if !d.getPrecedenceDfa() { + panic("only precedence DFAs may contain a precedence start state") + } + + // s0.edges is never nil for a precedence DFA + if precedence < 0 || precedence >= len(d.getS0().getEdges()) { + return nil + } + + return d.getS0().getIthEdge(precedence) +} + +// setPrecedenceStartState sets the start state for the current precedence. d +// must be a precedence DFA. See also isPrecedenceDfa. +func (d *DFA) setPrecedenceStartState(precedence int, startState *DFAState) { + if !d.getPrecedenceDfa() { + panic("only precedence DFAs may contain a precedence start state") + } + + if precedence < 0 { + return + } + + // Synchronization on s0 here is ok. When the DFA is turned into a + // precedence DFA, s0 will be initialized once and not updated again. s0.edges + // is never nil for a precedence DFA. + s0 := d.getS0() + if precedence >= s0.numEdges() { + edges := append(s0.getEdges(), make([]*DFAState, precedence+1-s0.numEdges())...) + s0.setEdges(edges) + d.setS0(s0) + } + + s0.setIthEdge(precedence, startState) +} + +func (d *DFA) getPrecedenceDfa() bool { + return d.precedenceDfa +} + +// setPrecedenceDfa sets whether d is a precedence DFA. If precedenceDfa differs +// from the current DFA configuration, then d.states is cleared, the initial +// state s0 is set to a new DFAState with an empty outgoing DFAState.edges to +// store the start states for individual precedence values if precedenceDfa is +// true or nil otherwise, and d.precedenceDfa is updated. +func (d *DFA) setPrecedenceDfa(precedenceDfa bool) { + if d.getPrecedenceDfa() != precedenceDfa { + d.states = NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst) + d.numstates = 0 + + if precedenceDfa { + precedenceState := NewDFAState(-1, NewBaseATNConfigSet(false)) + + precedenceState.setEdges(make([]*DFAState, 0)) + precedenceState.isAcceptState = false + precedenceState.requiresFullContext = false + d.setS0(precedenceState) + } else { + d.setS0(nil) + } + + d.precedenceDfa = precedenceDfa + } +} + +func (d *DFA) getS0() *DFAState { + return d.s0 +} + +func (d *DFA) setS0(s *DFAState) { + d.s0 = s +} + +// sortedStates returns the states in d sorted by their state number. +func (d *DFA) sortedStates() []*DFAState { + + vs := d.states.SortedSlice(func(i, j *DFAState) bool { + return i.stateNumber < j.stateNumber + }) + + return vs +} + +func (d *DFA) String(literalNames []string, symbolicNames []string) string { + if d.getS0() == nil { + return "" + } + + return NewDFASerializer(d, literalNames, symbolicNames).String() +} + +func (d *DFA) ToLexerString() string { + if d.getS0() == nil { + return "" + } + + return NewLexerDFASerializer(d).String() +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_serializer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_serializer.go index 84d0a31e53..2aab33cf3d 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_serializer.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_serializer.go @@ -1,158 +1,158 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "strconv" - "strings" -) - -// DFASerializer is a DFA walker that knows how to dump them to serialized -// strings. -type DFASerializer struct { - dfa *DFA - literalNames []string - symbolicNames []string -} - -func NewDFASerializer(dfa *DFA, literalNames, symbolicNames []string) *DFASerializer { - if literalNames == nil { - literalNames = make([]string, 0) - } - - if symbolicNames == nil { - symbolicNames = make([]string, 0) - } - - return &DFASerializer{ - dfa: dfa, - literalNames: literalNames, - symbolicNames: symbolicNames, - } -} - -func (d *DFASerializer) String() string { - if d.dfa.getS0() == nil { - return "" - } - - buf := "" - states := d.dfa.sortedStates() - - for _, s := range states { - if s.edges != nil { - n := len(s.edges) - - for j := 0; j < n; j++ { - t := s.edges[j] - - if t != nil && t.stateNumber != 0x7FFFFFFF { - buf += d.GetStateString(s) - buf += "-" - buf += d.getEdgeLabel(j) - buf += "->" - buf += d.GetStateString(t) - buf += "\n" - } - } - } - } - - if len(buf) == 0 { - return "" - } - - return buf -} - -func (d *DFASerializer) getEdgeLabel(i int) string { - if i == 0 { - return "EOF" - } else if d.literalNames != nil && i-1 < len(d.literalNames) { - return d.literalNames[i-1] - } else if d.symbolicNames != nil && i-1 < len(d.symbolicNames) { - return d.symbolicNames[i-1] - } - - return strconv.Itoa(i - 1) -} - -func (d *DFASerializer) GetStateString(s *DFAState) string { - var a, b string - - if s.isAcceptState { - a = ":" - } - - if s.requiresFullContext { - b = "^" - } - - baseStateStr := a + "s" + strconv.Itoa(s.stateNumber) + b - - if s.isAcceptState { - if s.predicates != nil { - return baseStateStr + "=>" + fmt.Sprint(s.predicates) - } - - return baseStateStr + "=>" + fmt.Sprint(s.prediction) - } - - return baseStateStr -} - -type LexerDFASerializer struct { - *DFASerializer -} - -func NewLexerDFASerializer(dfa *DFA) *LexerDFASerializer { - return &LexerDFASerializer{DFASerializer: NewDFASerializer(dfa, nil, nil)} -} - -func (l *LexerDFASerializer) getEdgeLabel(i int) string { - var sb strings.Builder - sb.Grow(6) - sb.WriteByte('\'') - sb.WriteRune(rune(i)) - sb.WriteByte('\'') - return sb.String() -} - -func (l *LexerDFASerializer) String() string { - if l.dfa.getS0() == nil { - return "" - } - - buf := "" - states := l.dfa.sortedStates() - - for i := 0; i < len(states); i++ { - s := states[i] - - if s.edges != nil { - n := len(s.edges) - - for j := 0; j < n; j++ { - t := s.edges[j] - - if t != nil && t.stateNumber != 0x7FFFFFFF { - buf += l.GetStateString(s) - buf += "-" - buf += l.getEdgeLabel(j) - buf += "->" - buf += l.GetStateString(t) - buf += "\n" - } - } - } - } - - if len(buf) == 0 { - return "" - } - - return buf -} +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" + "strings" +) + +// DFASerializer is a DFA walker that knows how to dump them to serialized +// strings. +type DFASerializer struct { + dfa *DFA + literalNames []string + symbolicNames []string +} + +func NewDFASerializer(dfa *DFA, literalNames, symbolicNames []string) *DFASerializer { + if literalNames == nil { + literalNames = make([]string, 0) + } + + if symbolicNames == nil { + symbolicNames = make([]string, 0) + } + + return &DFASerializer{ + dfa: dfa, + literalNames: literalNames, + symbolicNames: symbolicNames, + } +} + +func (d *DFASerializer) String() string { + if d.dfa.getS0() == nil { + return "" + } + + buf := "" + states := d.dfa.sortedStates() + + for _, s := range states { + if s.edges != nil { + n := len(s.edges) + + for j := 0; j < n; j++ { + t := s.edges[j] + + if t != nil && t.stateNumber != 0x7FFFFFFF { + buf += d.GetStateString(s) + buf += "-" + buf += d.getEdgeLabel(j) + buf += "->" + buf += d.GetStateString(t) + buf += "\n" + } + } + } + } + + if len(buf) == 0 { + return "" + } + + return buf +} + +func (d *DFASerializer) getEdgeLabel(i int) string { + if i == 0 { + return "EOF" + } else if d.literalNames != nil && i-1 < len(d.literalNames) { + return d.literalNames[i-1] + } else if d.symbolicNames != nil && i-1 < len(d.symbolicNames) { + return d.symbolicNames[i-1] + } + + return strconv.Itoa(i - 1) +} + +func (d *DFASerializer) GetStateString(s *DFAState) string { + var a, b string + + if s.isAcceptState { + a = ":" + } + + if s.requiresFullContext { + b = "^" + } + + baseStateStr := a + "s" + strconv.Itoa(s.stateNumber) + b + + if s.isAcceptState { + if s.predicates != nil { + return baseStateStr + "=>" + fmt.Sprint(s.predicates) + } + + return baseStateStr + "=>" + fmt.Sprint(s.prediction) + } + + return baseStateStr +} + +type LexerDFASerializer struct { + *DFASerializer +} + +func NewLexerDFASerializer(dfa *DFA) *LexerDFASerializer { + return &LexerDFASerializer{DFASerializer: NewDFASerializer(dfa, nil, nil)} +} + +func (l *LexerDFASerializer) getEdgeLabel(i int) string { + var sb strings.Builder + sb.Grow(6) + sb.WriteByte('\'') + sb.WriteRune(rune(i)) + sb.WriteByte('\'') + return sb.String() +} + +func (l *LexerDFASerializer) String() string { + if l.dfa.getS0() == nil { + return "" + } + + buf := "" + states := l.dfa.sortedStates() + + for i := 0; i < len(states); i++ { + s := states[i] + + if s.edges != nil { + n := len(s.edges) + + for j := 0; j < n; j++ { + t := s.edges[j] + + if t != nil && t.stateNumber != 0x7FFFFFFF { + buf += l.GetStateString(s) + buf += "-" + buf += l.getEdgeLabel(j) + buf += "->" + buf += l.GetStateString(t) + buf += "\n" + } + } + } + } + + if len(buf) == 0 { + return "" + } + + return buf +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_state.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_state.go index c90dec55c8..7d1dd0e7cd 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_state.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/dfa_state.go @@ -1,169 +1,169 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" -) - -// PredPrediction maps a predicate to a predicted alternative. -type PredPrediction struct { - alt int - pred SemanticContext -} - -func NewPredPrediction(pred SemanticContext, alt int) *PredPrediction { - return &PredPrediction{alt: alt, pred: pred} -} - -func (p *PredPrediction) String() string { - return "(" + fmt.Sprint(p.pred) + ", " + fmt.Sprint(p.alt) + ")" -} - -// DFAState represents a set of possible ATN configurations. As Aho, Sethi, -// Ullman p. 117 says: "The DFA uses its state to keep track of all possible -// states the ATN can be in after reading each input symbol. That is to say, -// after reading input a1a2..an, the DFA is in a state that represents the -// subset T of the states of the ATN that are reachable from the ATN's start -// state along some path labeled a1a2..an." In conventional NFA-to-DFA -// conversion, therefore, the subset T would be a bitset representing the set of -// states the ATN could be in. We need to track the alt predicted by each state -// as well, however. More importantly, we need to maintain a stack of states, -// tracking the closure operations as they jump from rule to rule, emulating -// rule invocations (method calls). I have to add a stack to simulate the proper -// lookahead sequences for the underlying LL grammar from which the ATN was -// derived. -// -// I use a set of ATNConfig objects, not simple states. An ATNConfig is both a -// state (ala normal conversion) and a RuleContext describing the chain of rules -// (if any) followed to arrive at that state. -// -// A DFAState may have multiple references to a particular state, but with -// different ATN contexts (with same or different alts) meaning that state was -// reached via a different set of rule invocations. -type DFAState struct { - stateNumber int - configs ATNConfigSet - - // edges elements point to the target of the symbol. Shift up by 1 so (-1) - // Token.EOF maps to the first element. - edges []*DFAState - - isAcceptState bool - - // prediction is the ttype we match or alt we predict if the state is accept. - // Set to ATN.INVALID_ALT_NUMBER when predicates != nil or - // requiresFullContext. - prediction int - - lexerActionExecutor *LexerActionExecutor - - // requiresFullContext indicates it was created during an SLL prediction that - // discovered a conflict between the configurations in the state. Future - // ParserATNSimulator.execATN invocations immediately jump doing - // full context prediction if true. - requiresFullContext bool - - // predicates is the predicates associated with the ATN configurations of the - // DFA state during SLL parsing. When we have predicates, requiresFullContext - // is false, since full context prediction evaluates predicates on-the-fly. If - // d is - // not nil, then prediction is ATN.INVALID_ALT_NUMBER. - // - // We only use these for non-requiresFullContext but conflicting states. That - // means we know from the context (it's $ or we don't dip into outer context) - // that it's an ambiguity not a conflict. - // - // This list is computed by - // ParserATNSimulator.predicateDFAState. - predicates []*PredPrediction -} - -func NewDFAState(stateNumber int, configs ATNConfigSet) *DFAState { - if configs == nil { - configs = NewBaseATNConfigSet(false) - } - - return &DFAState{configs: configs, stateNumber: stateNumber} -} - -// GetAltSet gets the set of all alts mentioned by all ATN configurations in d. -func (d *DFAState) GetAltSet() []int { - var alts []int - - if d.configs != nil { - for _, c := range d.configs.GetItems() { - alts = append(alts, c.GetAlt()) - } - } - - if len(alts) == 0 { - return nil - } - - return alts -} - -func (d *DFAState) getEdges() []*DFAState { - return d.edges -} - -func (d *DFAState) numEdges() int { - return len(d.edges) -} - -func (d *DFAState) getIthEdge(i int) *DFAState { - return d.edges[i] -} - -func (d *DFAState) setEdges(newEdges []*DFAState) { - d.edges = newEdges -} - -func (d *DFAState) setIthEdge(i int, edge *DFAState) { - d.edges[i] = edge -} - -func (d *DFAState) setPrediction(v int) { - d.prediction = v -} - -func (d *DFAState) String() string { - var s string - if d.isAcceptState { - if d.predicates != nil { - s = "=>" + fmt.Sprint(d.predicates) - } else { - s = "=>" + fmt.Sprint(d.prediction) - } - } - - return fmt.Sprintf("%d:%s%s", d.stateNumber, fmt.Sprint(d.configs), s) -} - -func (d *DFAState) Hash() int { - h := murmurInit(7) - h = murmurUpdate(h, d.configs.Hash()) - return murmurFinish(h, 1) -} - -// Equals returns whether d equals other. Two DFAStates are equal if their ATN -// configuration sets are the same. This method is used to see if a state -// already exists. -// -// Because the number of alternatives and number of ATN configurations are -// finite, there is a finite number of DFA states that can be processed. This is -// necessary to show that the algorithm terminates. -// -// Cannot test the DFA state numbers here because in -// ParserATNSimulator.addDFAState we need to know if any other state exists that -// has d exact set of ATN configurations. The stateNumber is irrelevant. -func (d *DFAState) Equals(o Collectable[*DFAState]) bool { - if d == o { - return true - } - - return d.configs.Equals(o.(*DFAState).configs) -} +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" +) + +// PredPrediction maps a predicate to a predicted alternative. +type PredPrediction struct { + alt int + pred SemanticContext +} + +func NewPredPrediction(pred SemanticContext, alt int) *PredPrediction { + return &PredPrediction{alt: alt, pred: pred} +} + +func (p *PredPrediction) String() string { + return "(" + fmt.Sprint(p.pred) + ", " + fmt.Sprint(p.alt) + ")" +} + +// DFAState represents a set of possible ATN configurations. As Aho, Sethi, +// Ullman p. 117 says: "The DFA uses its state to keep track of all possible +// states the ATN can be in after reading each input symbol. That is to say, +// after reading input a1a2..an, the DFA is in a state that represents the +// subset T of the states of the ATN that are reachable from the ATN's start +// state along some path labeled a1a2..an." In conventional NFA-to-DFA +// conversion, therefore, the subset T would be a bitset representing the set of +// states the ATN could be in. We need to track the alt predicted by each state +// as well, however. More importantly, we need to maintain a stack of states, +// tracking the closure operations as they jump from rule to rule, emulating +// rule invocations (method calls). I have to add a stack to simulate the proper +// lookahead sequences for the underlying LL grammar from which the ATN was +// derived. +// +// I use a set of ATNConfig objects, not simple states. An ATNConfig is both a +// state (ala normal conversion) and a RuleContext describing the chain of rules +// (if any) followed to arrive at that state. +// +// A DFAState may have multiple references to a particular state, but with +// different ATN contexts (with same or different alts) meaning that state was +// reached via a different set of rule invocations. +type DFAState struct { + stateNumber int + configs ATNConfigSet + + // edges elements point to the target of the symbol. Shift up by 1 so (-1) + // Token.EOF maps to the first element. + edges []*DFAState + + isAcceptState bool + + // prediction is the ttype we match or alt we predict if the state is accept. + // Set to ATN.INVALID_ALT_NUMBER when predicates != nil or + // requiresFullContext. + prediction int + + lexerActionExecutor *LexerActionExecutor + + // requiresFullContext indicates it was created during an SLL prediction that + // discovered a conflict between the configurations in the state. Future + // ParserATNSimulator.execATN invocations immediately jump doing + // full context prediction if true. + requiresFullContext bool + + // predicates is the predicates associated with the ATN configurations of the + // DFA state during SLL parsing. When we have predicates, requiresFullContext + // is false, since full context prediction evaluates predicates on-the-fly. If + // d is + // not nil, then prediction is ATN.INVALID_ALT_NUMBER. + // + // We only use these for non-requiresFullContext but conflicting states. That + // means we know from the context (it's $ or we don't dip into outer context) + // that it's an ambiguity not a conflict. + // + // This list is computed by + // ParserATNSimulator.predicateDFAState. + predicates []*PredPrediction +} + +func NewDFAState(stateNumber int, configs ATNConfigSet) *DFAState { + if configs == nil { + configs = NewBaseATNConfigSet(false) + } + + return &DFAState{configs: configs, stateNumber: stateNumber} +} + +// GetAltSet gets the set of all alts mentioned by all ATN configurations in d. +func (d *DFAState) GetAltSet() []int { + var alts []int + + if d.configs != nil { + for _, c := range d.configs.GetItems() { + alts = append(alts, c.GetAlt()) + } + } + + if len(alts) == 0 { + return nil + } + + return alts +} + +func (d *DFAState) getEdges() []*DFAState { + return d.edges +} + +func (d *DFAState) numEdges() int { + return len(d.edges) +} + +func (d *DFAState) getIthEdge(i int) *DFAState { + return d.edges[i] +} + +func (d *DFAState) setEdges(newEdges []*DFAState) { + d.edges = newEdges +} + +func (d *DFAState) setIthEdge(i int, edge *DFAState) { + d.edges[i] = edge +} + +func (d *DFAState) setPrediction(v int) { + d.prediction = v +} + +func (d *DFAState) String() string { + var s string + if d.isAcceptState { + if d.predicates != nil { + s = "=>" + fmt.Sprint(d.predicates) + } else { + s = "=>" + fmt.Sprint(d.prediction) + } + } + + return fmt.Sprintf("%d:%s%s", d.stateNumber, fmt.Sprint(d.configs), s) +} + +func (d *DFAState) Hash() int { + h := murmurInit(7) + h = murmurUpdate(h, d.configs.Hash()) + return murmurFinish(h, 1) +} + +// Equals returns whether d equals other. Two DFAStates are equal if their ATN +// configuration sets are the same. This method is used to see if a state +// already exists. +// +// Because the number of alternatives and number of ATN configurations are +// finite, there is a finite number of DFA states that can be processed. This is +// necessary to show that the algorithm terminates. +// +// Cannot test the DFA state numbers here because in +// ParserATNSimulator.addDFAState we need to know if any other state exists that +// has d exact set of ATN configurations. The stateNumber is irrelevant. +func (d *DFAState) Equals(o Collectable[*DFAState]) bool { + if d == o { + return true + } + + return d.configs.Equals(o.(*DFAState).configs) +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/diagnostic_error_listener.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/diagnostic_error_listener.go index c55bcc19b2..44fe4e01c8 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/diagnostic_error_listener.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/diagnostic_error_listener.go @@ -1,109 +1,109 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "strconv" -) - -// -// This implementation of {@link ANTLRErrorListener} can be used to identify -// certain potential correctness and performance problems in grammars. "reports" -// are made by calling {@link Parser//NotifyErrorListeners} with the appropriate -// message. -// -// - -type DiagnosticErrorListener struct { - *DefaultErrorListener - - exactOnly bool -} - -func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener { - - n := new(DiagnosticErrorListener) - - // whether all ambiguities or only exact ambiguities are Reported. - n.exactOnly = exactOnly - return n -} - -func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) { - if d.exactOnly && !exact { - return - } - msg := "reportAmbiguity d=" + - d.getDecisionDescription(recognizer, dfa) + - ": ambigAlts=" + - d.getConflictingAlts(ambigAlts, configs).String() + - ", input='" + - recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'" - recognizer.NotifyErrorListeners(msg, nil, nil) -} - -func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) { - - msg := "reportAttemptingFullContext d=" + - d.getDecisionDescription(recognizer, dfa) + - ", input='" + - recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'" - recognizer.NotifyErrorListeners(msg, nil, nil) -} - -func (d *DiagnosticErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) { - msg := "reportContextSensitivity d=" + - d.getDecisionDescription(recognizer, dfa) + - ", input='" + - recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'" - recognizer.NotifyErrorListeners(msg, nil, nil) -} - -func (d *DiagnosticErrorListener) getDecisionDescription(recognizer Parser, dfa *DFA) string { - decision := dfa.decision - ruleIndex := dfa.atnStartState.GetRuleIndex() - - ruleNames := recognizer.GetRuleNames() - if ruleIndex < 0 || ruleIndex >= len(ruleNames) { - return strconv.Itoa(decision) - } - ruleName := ruleNames[ruleIndex] - if ruleName == "" { - return strconv.Itoa(decision) - } - return strconv.Itoa(decision) + " (" + ruleName + ")" -} - -// Computes the set of conflicting or ambiguous alternatives from a -// configuration set, if that information was not already provided by the -// parser. -// -// @param ReportedAlts The set of conflicting or ambiguous alternatives, as -// Reported by the parser. -// @param configs The conflicting or ambiguous configuration set. -// @return Returns {@code ReportedAlts} if it is not {@code nil}, otherwise -// returns the set of alternatives represented in {@code configs}. -func (d *DiagnosticErrorListener) getConflictingAlts(ReportedAlts *BitSet, set ATNConfigSet) *BitSet { - if ReportedAlts != nil { - return ReportedAlts - } - result := NewBitSet() - for _, c := range set.GetItems() { - result.add(c.GetAlt()) - } - - return result -} +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "strconv" +) + +// +// This implementation of {@link ANTLRErrorListener} can be used to identify +// certain potential correctness and performance problems in grammars. "reports" +// are made by calling {@link Parser//NotifyErrorListeners} with the appropriate +// message. +// +// + +type DiagnosticErrorListener struct { + *DefaultErrorListener + + exactOnly bool +} + +func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener { + + n := new(DiagnosticErrorListener) + + // whether all ambiguities or only exact ambiguities are Reported. + n.exactOnly = exactOnly + return n +} + +func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) { + if d.exactOnly && !exact { + return + } + msg := "reportAmbiguity d=" + + d.getDecisionDescription(recognizer, dfa) + + ": ambigAlts=" + + d.getConflictingAlts(ambigAlts, configs).String() + + ", input='" + + recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'" + recognizer.NotifyErrorListeners(msg, nil, nil) +} + +func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) { + + msg := "reportAttemptingFullContext d=" + + d.getDecisionDescription(recognizer, dfa) + + ", input='" + + recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'" + recognizer.NotifyErrorListeners(msg, nil, nil) +} + +func (d *DiagnosticErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) { + msg := "reportContextSensitivity d=" + + d.getDecisionDescription(recognizer, dfa) + + ", input='" + + recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'" + recognizer.NotifyErrorListeners(msg, nil, nil) +} + +func (d *DiagnosticErrorListener) getDecisionDescription(recognizer Parser, dfa *DFA) string { + decision := dfa.decision + ruleIndex := dfa.atnStartState.GetRuleIndex() + + ruleNames := recognizer.GetRuleNames() + if ruleIndex < 0 || ruleIndex >= len(ruleNames) { + return strconv.Itoa(decision) + } + ruleName := ruleNames[ruleIndex] + if ruleName == "" { + return strconv.Itoa(decision) + } + return strconv.Itoa(decision) + " (" + ruleName + ")" +} + +// Computes the set of conflicting or ambiguous alternatives from a +// configuration set, if that information was not already provided by the +// parser. +// +// @param ReportedAlts The set of conflicting or ambiguous alternatives, as +// Reported by the parser. +// @param configs The conflicting or ambiguous configuration set. +// @return Returns {@code ReportedAlts} if it is not {@code nil}, otherwise +// returns the set of alternatives represented in {@code configs}. +func (d *DiagnosticErrorListener) getConflictingAlts(ReportedAlts *BitSet, set ATNConfigSet) *BitSet { + if ReportedAlts != nil { + return ReportedAlts + } + result := NewBitSet() + for _, c := range set.GetItems() { + result.add(c.GetAlt()) + } + + return result +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_listener.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_listener.go index f679f0dcd5..2437e64c3d 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_listener.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_listener.go @@ -1,104 +1,104 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "os" - "strconv" -) - -// Provides an empty default implementation of {@link ANTLRErrorListener}. The -// default implementation of each method does nothing, but can be overridden as -// necessary. - -type ErrorListener interface { - SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) - ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) - ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) - ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) -} - -type DefaultErrorListener struct { -} - -func NewDefaultErrorListener() *DefaultErrorListener { - return new(DefaultErrorListener) -} - -func (d *DefaultErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) { -} - -func (d *DefaultErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) { -} - -func (d *DefaultErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) { -} - -func (d *DefaultErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) { -} - -type ConsoleErrorListener struct { - *DefaultErrorListener -} - -func NewConsoleErrorListener() *ConsoleErrorListener { - return new(ConsoleErrorListener) -} - -// Provides a default instance of {@link ConsoleErrorListener}. -var ConsoleErrorListenerINSTANCE = NewConsoleErrorListener() - -// {@inheritDoc} -// -//

-// This implementation prints messages to {@link System//err} containing the -// values of {@code line}, {@code charPositionInLine}, and {@code msg} using -// the following format.

-// -//
-// line line:charPositionInLine msg
-// 
-func (c *ConsoleErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) { - fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg) -} - -type ProxyErrorListener struct { - *DefaultErrorListener - delegates []ErrorListener -} - -func NewProxyErrorListener(delegates []ErrorListener) *ProxyErrorListener { - if delegates == nil { - panic("delegates is not provided") - } - l := new(ProxyErrorListener) - l.delegates = delegates - return l -} - -func (p *ProxyErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) { - for _, d := range p.delegates { - d.SyntaxError(recognizer, offendingSymbol, line, column, msg, e) - } -} - -func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) { - for _, d := range p.delegates { - d.ReportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs) - } -} - -func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) { - for _, d := range p.delegates { - d.ReportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs) - } -} - -func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) { - for _, d := range p.delegates { - d.ReportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs) - } -} +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "os" + "strconv" +) + +// Provides an empty default implementation of {@link ANTLRErrorListener}. The +// default implementation of each method does nothing, but can be overridden as +// necessary. + +type ErrorListener interface { + SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) + ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) + ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) + ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) +} + +type DefaultErrorListener struct { +} + +func NewDefaultErrorListener() *DefaultErrorListener { + return new(DefaultErrorListener) +} + +func (d *DefaultErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) { +} + +func (d *DefaultErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) { +} + +func (d *DefaultErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) { +} + +func (d *DefaultErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) { +} + +type ConsoleErrorListener struct { + *DefaultErrorListener +} + +func NewConsoleErrorListener() *ConsoleErrorListener { + return new(ConsoleErrorListener) +} + +// Provides a default instance of {@link ConsoleErrorListener}. +var ConsoleErrorListenerINSTANCE = NewConsoleErrorListener() + +// {@inheritDoc} +// +//

+// This implementation prints messages to {@link System//err} containing the +// values of {@code line}, {@code charPositionInLine}, and {@code msg} using +// the following format.

+// +//
+// line line:charPositionInLine msg
+// 
+func (c *ConsoleErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) { + fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg) +} + +type ProxyErrorListener struct { + *DefaultErrorListener + delegates []ErrorListener +} + +func NewProxyErrorListener(delegates []ErrorListener) *ProxyErrorListener { + if delegates == nil { + panic("delegates is not provided") + } + l := new(ProxyErrorListener) + l.delegates = delegates + return l +} + +func (p *ProxyErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) { + for _, d := range p.delegates { + d.SyntaxError(recognizer, offendingSymbol, line, column, msg, e) + } +} + +func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) { + for _, d := range p.delegates { + d.ReportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs) + } +} + +func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) { + for _, d := range p.delegates { + d.ReportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs) + } +} + +func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) { + for _, d := range p.delegates { + d.ReportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs) + } +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_strategy.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_strategy.go index 5c0a637ba4..bfe3e9a725 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_strategy.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/error_strategy.go @@ -1,734 +1,734 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "reflect" - "strconv" - "strings" -) - -type ErrorStrategy interface { - reset(Parser) - RecoverInline(Parser) Token - Recover(Parser, RecognitionException) - Sync(Parser) - InErrorRecoveryMode(Parser) bool - ReportError(Parser, RecognitionException) - ReportMatch(Parser) -} - -// This is the default implementation of {@link ANTLRErrorStrategy} used for -// error Reporting and recovery in ANTLR parsers. -type DefaultErrorStrategy struct { - errorRecoveryMode bool - lastErrorIndex int - lastErrorStates *IntervalSet -} - -var _ ErrorStrategy = &DefaultErrorStrategy{} - -func NewDefaultErrorStrategy() *DefaultErrorStrategy { - - d := new(DefaultErrorStrategy) - - // Indicates whether the error strategy is currently "recovering from an - // error". This is used to suppress Reporting multiple error messages while - // attempting to recover from a detected syntax error. - // - // @see //InErrorRecoveryMode - // - d.errorRecoveryMode = false - - // The index into the input stream where the last error occurred. - // This is used to prevent infinite loops where an error is found - // but no token is consumed during recovery...another error is found, - // ad nauseum. This is a failsafe mechanism to guarantee that at least - // one token/tree node is consumed for two errors. - // - d.lastErrorIndex = -1 - d.lastErrorStates = nil - return d -} - -//

The default implementation simply calls {@link //endErrorCondition} to -// ensure that the handler is not in error recovery mode.

-func (d *DefaultErrorStrategy) reset(recognizer Parser) { - d.endErrorCondition(recognizer) -} - -// This method is called to enter error recovery mode when a recognition -// exception is Reported. -// -// @param recognizer the parser instance -func (d *DefaultErrorStrategy) beginErrorCondition(recognizer Parser) { - d.errorRecoveryMode = true -} - -func (d *DefaultErrorStrategy) InErrorRecoveryMode(recognizer Parser) bool { - return d.errorRecoveryMode -} - -// This method is called to leave error recovery mode after recovering from -// a recognition exception. -// -// @param recognizer -func (d *DefaultErrorStrategy) endErrorCondition(recognizer Parser) { - d.errorRecoveryMode = false - d.lastErrorStates = nil - d.lastErrorIndex = -1 -} - -// {@inheritDoc} -// -//

The default implementation simply calls {@link //endErrorCondition}.

-func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser) { - d.endErrorCondition(recognizer) -} - -// {@inheritDoc} -// -//

The default implementation returns immediately if the handler is already -// in error recovery mode. Otherwise, it calls {@link //beginErrorCondition} -// and dispatches the Reporting task based on the runtime type of {@code e} -// according to the following table.

-// -// -func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionException) { - // if we've already Reported an error and have not Matched a token - // yet successfully, don't Report any errors. - if d.InErrorRecoveryMode(recognizer) { - return // don't Report spurious errors - } - d.beginErrorCondition(recognizer) - - switch t := e.(type) { - default: - fmt.Println("unknown recognition error type: " + reflect.TypeOf(e).Name()) - // fmt.Println(e.stack) - recognizer.NotifyErrorListeners(e.GetMessage(), e.GetOffendingToken(), e) - case *NoViableAltException: - d.ReportNoViableAlternative(recognizer, t) - case *InputMisMatchException: - d.ReportInputMisMatch(recognizer, t) - case *FailedPredicateException: - d.ReportFailedPredicate(recognizer, t) - } -} - -// {@inheritDoc} -// -//

The default implementation reSynchronizes the parser by consuming tokens -// until we find one in the reSynchronization set--loosely the set of tokens -// that can follow the current rule.

-func (d *DefaultErrorStrategy) Recover(recognizer Parser, e RecognitionException) { - - if d.lastErrorIndex == recognizer.GetInputStream().Index() && - d.lastErrorStates != nil && d.lastErrorStates.contains(recognizer.GetState()) { - // uh oh, another error at same token index and previously-Visited - // state in ATN must be a case where LT(1) is in the recovery - // token set so nothing got consumed. Consume a single token - // at least to prevent an infinite loop d is a failsafe. - recognizer.Consume() - } - d.lastErrorIndex = recognizer.GetInputStream().Index() - if d.lastErrorStates == nil { - d.lastErrorStates = NewIntervalSet() - } - d.lastErrorStates.addOne(recognizer.GetState()) - followSet := d.getErrorRecoverySet(recognizer) - d.consumeUntil(recognizer, followSet) -} - -// The default implementation of {@link ANTLRErrorStrategy//Sync} makes sure -// that the current lookahead symbol is consistent with what were expecting -// at d point in the ATN. You can call d anytime but ANTLR only -// generates code to check before subrules/loops and each iteration. -// -//

Implements Jim Idle's magic Sync mechanism in closures and optional -// subrules. E.g.,

-// -//
-// a : Sync ( stuff Sync )*
-// Sync : {consume to what can follow Sync}
-// 
-// -// At the start of a sub rule upon error, {@link //Sync} performs single -// token deletion, if possible. If it can't do that, it bails on the current -// rule and uses the default error recovery, which consumes until the -// reSynchronization set of the current rule. -// -//

If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block -// with an empty alternative), then the expected set includes what follows -// the subrule.

-// -//

During loop iteration, it consumes until it sees a token that can start a -// sub rule or what follows loop. Yes, that is pretty aggressive. We opt to -// stay in the loop as long as possible.

-// -//

ORIGINS

-// -//

Previous versions of ANTLR did a poor job of their recovery within loops. -// A single mismatch token or missing token would force the parser to bail -// out of the entire rules surrounding the loop. So, for rule

-// -//
-// classfunc : 'class' ID '{' member* '}'
-// 
-// -// input with an extra token between members would force the parser to -// consume until it found the next class definition rather than the next -// member definition of the current class. -// -//

This functionality cost a little bit of effort because the parser has to -// compare token set at the start of the loop and at each iteration. If for -// some reason speed is suffering for you, you can turn off d -// functionality by simply overriding d method as a blank { }.

-func (d *DefaultErrorStrategy) Sync(recognizer Parser) { - // If already recovering, don't try to Sync - if d.InErrorRecoveryMode(recognizer) { - return - } - - s := recognizer.GetInterpreter().atn.states[recognizer.GetState()] - la := recognizer.GetTokenStream().LA(1) - - // try cheaper subset first might get lucky. seems to shave a wee bit off - nextTokens := recognizer.GetATN().NextTokens(s, nil) - if nextTokens.contains(TokenEpsilon) || nextTokens.contains(la) { - return - } - - switch s.GetStateType() { - case ATNStateBlockStart, ATNStateStarBlockStart, ATNStatePlusBlockStart, ATNStateStarLoopEntry: - // Report error and recover if possible - if d.SingleTokenDeletion(recognizer) != nil { - return - } - panic(NewInputMisMatchException(recognizer)) - case ATNStatePlusLoopBack, ATNStateStarLoopBack: - d.ReportUnwantedToken(recognizer) - expecting := NewIntervalSet() - expecting.addSet(recognizer.GetExpectedTokens()) - whatFollowsLoopIterationOrRule := expecting.addSet(d.getErrorRecoverySet(recognizer)) - d.consumeUntil(recognizer, whatFollowsLoopIterationOrRule) - default: - // do nothing if we can't identify the exact kind of ATN state - } -} - -// This is called by {@link //ReportError} when the exception is a -// {@link NoViableAltException}. -// -// @see //ReportError -// -// @param recognizer the parser instance -// @param e the recognition exception -func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *NoViableAltException) { - tokens := recognizer.GetTokenStream() - var input string - if tokens != nil { - if e.startToken.GetTokenType() == TokenEOF { - input = "" - } else { - input = tokens.GetTextFromTokens(e.startToken, e.offendingToken) - } - } else { - input = "" - } - msg := "no viable alternative at input " + d.escapeWSAndQuote(input) - recognizer.NotifyErrorListeners(msg, e.offendingToken, e) -} - -// This is called by {@link //ReportError} when the exception is an -// {@link InputMisMatchException}. -// -// @see //ReportError -// -// @param recognizer the parser instance -// @param e the recognition exception -func (this *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException) { - msg := "mismatched input " + this.GetTokenErrorDisplay(e.offendingToken) + - " expecting " + e.getExpectedTokens().StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) - recognizer.NotifyErrorListeners(msg, e.offendingToken, e) -} - -// This is called by {@link //ReportError} when the exception is a -// {@link FailedPredicateException}. -// -// @see //ReportError -// -// @param recognizer the parser instance -// @param e the recognition exception -func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *FailedPredicateException) { - ruleName := recognizer.GetRuleNames()[recognizer.GetParserRuleContext().GetRuleIndex()] - msg := "rule " + ruleName + " " + e.message - recognizer.NotifyErrorListeners(msg, e.offendingToken, e) -} - -// This method is called to Report a syntax error which requires the removal -// of a token from the input stream. At the time d method is called, the -// erroneous symbol is current {@code LT(1)} symbol and has not yet been -// removed from the input stream. When d method returns, -// {@code recognizer} is in error recovery mode. -// -//

This method is called when {@link //singleTokenDeletion} identifies -// single-token deletion as a viable recovery strategy for a mismatched -// input error.

-// -//

The default implementation simply returns if the handler is already in -// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to -// enter error recovery mode, followed by calling -// {@link Parser//NotifyErrorListeners}.

-// -// @param recognizer the parser instance -func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) { - if d.InErrorRecoveryMode(recognizer) { - return - } - d.beginErrorCondition(recognizer) - t := recognizer.GetCurrentToken() - tokenName := d.GetTokenErrorDisplay(t) - expecting := d.GetExpectedTokens(recognizer) - msg := "extraneous input " + tokenName + " expecting " + - expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) - recognizer.NotifyErrorListeners(msg, t, nil) -} - -// This method is called to Report a syntax error which requires the -// insertion of a missing token into the input stream. At the time d -// method is called, the missing token has not yet been inserted. When d -// method returns, {@code recognizer} is in error recovery mode. -// -//

This method is called when {@link //singleTokenInsertion} identifies -// single-token insertion as a viable recovery strategy for a mismatched -// input error.

-// -//

The default implementation simply returns if the handler is already in -// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to -// enter error recovery mode, followed by calling -// {@link Parser//NotifyErrorListeners}.

-// -// @param recognizer the parser instance -func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) { - if d.InErrorRecoveryMode(recognizer) { - return - } - d.beginErrorCondition(recognizer) - t := recognizer.GetCurrentToken() - expecting := d.GetExpectedTokens(recognizer) - msg := "missing " + expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) + - " at " + d.GetTokenErrorDisplay(t) - recognizer.NotifyErrorListeners(msg, t, nil) -} - -//

The default implementation attempts to recover from the mismatched input -// by using single token insertion and deletion as described below. If the -// recovery attempt fails, d method panics an -// {@link InputMisMatchException}.

-// -//

EXTRA TOKEN (single token deletion)

-// -//

{@code LA(1)} is not what we are looking for. If {@code LA(2)} has the -// right token, however, then assume {@code LA(1)} is some extra spurious -// token and delete it. Then consume and return the next token (which was -// the {@code LA(2)} token) as the successful result of the Match operation.

-// -//

This recovery strategy is implemented by {@link -// //singleTokenDeletion}.

-// -//

MISSING TOKEN (single token insertion)

-// -//

If current token (at {@code LA(1)}) is consistent with what could come -// after the expected {@code LA(1)} token, then assume the token is missing -// and use the parser's {@link TokenFactory} to create it on the fly. The -// "insertion" is performed by returning the created token as the successful -// result of the Match operation.

-// -//

This recovery strategy is implemented by {@link -// //singleTokenInsertion}.

-// -//

EXAMPLE

-// -//

For example, Input {@code i=(3} is clearly missing the {@code ')'}. When -// the parser returns from the nested call to {@code expr}, it will have -// call chain:

-// -//
-// stat &rarr expr &rarr atom
-// 
-// -// and it will be trying to Match the {@code ')'} at d point in the -// derivation: -// -//
-// => ID '=' '(' INT ')' ('+' atom)* ”
-// ^
-// 
-// -// The attempt to Match {@code ')'} will fail when it sees {@code ”} and -// call {@link //recoverInline}. To recover, it sees that {@code LA(1)==”} -// is in the set of tokens that can follow the {@code ')'} token reference -// in rule {@code atom}. It can assume that you forgot the {@code ')'}. -func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token { - // SINGLE TOKEN DELETION - MatchedSymbol := d.SingleTokenDeletion(recognizer) - if MatchedSymbol != nil { - // we have deleted the extra token. - // now, move past ttype token as if all were ok - recognizer.Consume() - return MatchedSymbol - } - // SINGLE TOKEN INSERTION - if d.SingleTokenInsertion(recognizer) { - return d.GetMissingSymbol(recognizer) - } - // even that didn't work must panic the exception - panic(NewInputMisMatchException(recognizer)) -} - -// This method implements the single-token insertion inline error recovery -// strategy. It is called by {@link //recoverInline} if the single-token -// deletion strategy fails to recover from the mismatched input. If this -// method returns {@code true}, {@code recognizer} will be in error recovery -// mode. -// -//

This method determines whether or not single-token insertion is viable by -// checking if the {@code LA(1)} input symbol could be successfully Matched -// if it were instead the {@code LA(2)} symbol. If d method returns -// {@code true}, the caller is responsible for creating and inserting a -// token with the correct type to produce d behavior.

-// -// @param recognizer the parser instance -// @return {@code true} if single-token insertion is a viable recovery -// strategy for the current mismatched input, otherwise {@code false} -func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool { - currentSymbolType := recognizer.GetTokenStream().LA(1) - // if current token is consistent with what could come after current - // ATN state, then we know we're missing a token error recovery - // is free to conjure up and insert the missing token - atn := recognizer.GetInterpreter().atn - currentState := atn.states[recognizer.GetState()] - next := currentState.GetTransitions()[0].getTarget() - expectingAtLL2 := atn.NextTokens(next, recognizer.GetParserRuleContext()) - if expectingAtLL2.contains(currentSymbolType) { - d.ReportMissingToken(recognizer) - return true - } - - return false -} - -// This method implements the single-token deletion inline error recovery -// strategy. It is called by {@link //recoverInline} to attempt to recover -// from mismatched input. If this method returns nil, the parser and error -// handler state will not have changed. If this method returns non-nil, -// {@code recognizer} will not be in error recovery mode since the -// returned token was a successful Match. -// -//

If the single-token deletion is successful, d method calls -// {@link //ReportUnwantedToken} to Report the error, followed by -// {@link Parser//consume} to actually "delete" the extraneous token. Then, -// before returning {@link //ReportMatch} is called to signal a successful -// Match.

-// -// @param recognizer the parser instance -// @return the successfully Matched {@link Token} instance if single-token -// deletion successfully recovers from the mismatched input, otherwise -// {@code nil} -func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token { - NextTokenType := recognizer.GetTokenStream().LA(2) - expecting := d.GetExpectedTokens(recognizer) - if expecting.contains(NextTokenType) { - d.ReportUnwantedToken(recognizer) - // print("recoverFromMisMatchedToken deleting " \ - // + str(recognizer.GetTokenStream().LT(1)) \ - // + " since " + str(recognizer.GetTokenStream().LT(2)) \ - // + " is what we want", file=sys.stderr) - recognizer.Consume() // simply delete extra token - // we want to return the token we're actually Matching - MatchedSymbol := recognizer.GetCurrentToken() - d.ReportMatch(recognizer) // we know current token is correct - return MatchedSymbol - } - - return nil -} - -// Conjure up a missing token during error recovery. -// -// The recognizer attempts to recover from single missing -// symbols. But, actions might refer to that missing symbol. -// For example, x=ID {f($x)}. The action clearly assumes -// that there has been an identifier Matched previously and that -// $x points at that token. If that token is missing, but -// the next token in the stream is what we want we assume that -// d token is missing and we keep going. Because we -// have to return some token to replace the missing token, -// we have to conjure one up. This method gives the user control -// over the tokens returned for missing tokens. Mostly, -// you will want to create something special for identifier -// tokens. For literals such as '{' and ',', the default -// action in the parser or tree parser works. It simply creates -// a CommonToken of the appropriate type. The text will be the token. -// If you change what tokens must be created by the lexer, -// override d method to create the appropriate tokens. -func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token { - currentSymbol := recognizer.GetCurrentToken() - expecting := d.GetExpectedTokens(recognizer) - expectedTokenType := expecting.first() - var tokenText string - - if expectedTokenType == TokenEOF { - tokenText = "" - } else { - ln := recognizer.GetLiteralNames() - if expectedTokenType > 0 && expectedTokenType < len(ln) { - tokenText = "" - } else { - tokenText = "" // TODO matches the JS impl - } - } - current := currentSymbol - lookback := recognizer.GetTokenStream().LT(-1) - if current.GetTokenType() == TokenEOF && lookback != nil { - current = lookback - } - - tf := recognizer.GetTokenFactory() - - return tf.Create(current.GetSource(), expectedTokenType, tokenText, TokenDefaultChannel, -1, -1, current.GetLine(), current.GetColumn()) -} - -func (d *DefaultErrorStrategy) GetExpectedTokens(recognizer Parser) *IntervalSet { - return recognizer.GetExpectedTokens() -} - -// How should a token be displayed in an error message? The default -// is to display just the text, but during development you might -// want to have a lot of information spit out. Override in that case -// to use t.String() (which, for CommonToken, dumps everything about -// the token). This is better than forcing you to override a method in -// your token objects because you don't have to go modify your lexer -// so that it creates a NewJava type. -func (d *DefaultErrorStrategy) GetTokenErrorDisplay(t Token) string { - if t == nil { - return "" - } - s := t.GetText() - if s == "" { - if t.GetTokenType() == TokenEOF { - s = "" - } else { - s = "<" + strconv.Itoa(t.GetTokenType()) + ">" - } - } - return d.escapeWSAndQuote(s) -} - -func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string { - s = strings.Replace(s, "\t", "\\t", -1) - s = strings.Replace(s, "\n", "\\n", -1) - s = strings.Replace(s, "\r", "\\r", -1) - return "'" + s + "'" -} - -// Compute the error recovery set for the current rule. During -// rule invocation, the parser pushes the set of tokens that can -// follow that rule reference on the stack d amounts to -// computing FIRST of what follows the rule reference in the -// enclosing rule. See LinearApproximator.FIRST(). -// This local follow set only includes tokens -// from within the rule i.e., the FIRST computation done by -// ANTLR stops at the end of a rule. -// -// # EXAMPLE -// -// When you find a "no viable alt exception", the input is not -// consistent with any of the alternatives for rule r. The best -// thing to do is to consume tokens until you see something that -// can legally follow a call to r//or* any rule that called r. -// You don't want the exact set of viable next tokens because the -// input might just be missing a token--you might consume the -// rest of the input looking for one of the missing tokens. -// -// Consider grammar: -// -// a : '[' b ']' -// | '(' b ')' -// -// b : c '^' INT -// c : ID -// | INT -// -// At each rule invocation, the set of tokens that could follow -// that rule is pushed on a stack. Here are the various -// context-sensitive follow sets: -// -// FOLLOW(b1_in_a) = FIRST(']') = ']' -// FOLLOW(b2_in_a) = FIRST(')') = ')' -// FOLLOW(c_in_b) = FIRST('^') = '^' -// -// Upon erroneous input "[]", the call chain is -// -// a -> b -> c -// -// and, hence, the follow context stack is: -// -// depth follow set start of rule execution -// 0 a (from main()) -// 1 ']' b -// 2 '^' c -// -// Notice that ')' is not included, because b would have to have -// been called from a different context in rule a for ')' to be -// included. -// -// For error recovery, we cannot consider FOLLOW(c) -// (context-sensitive or otherwise). We need the combined set of -// all context-sensitive FOLLOW sets--the set of all tokens that -// could follow any reference in the call chain. We need to -// reSync to one of those tokens. Note that FOLLOW(c)='^' and if -// we reSync'd to that token, we'd consume until EOF. We need to -// Sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}. -// In this case, for input "[]", LA(1) is ']' and in the set, so we would -// not consume anything. After printing an error, rule c would -// return normally. Rule b would not find the required '^' though. -// At this point, it gets a mismatched token error and panics an -// exception (since LA(1) is not in the viable following token -// set). The rule exception handler tries to recover, but finds -// the same recovery set and doesn't consume anything. Rule b -// exits normally returning to rule a. Now it finds the ']' (and -// with the successful Match exits errorRecovery mode). -// -// So, you can see that the parser walks up the call chain looking -// for the token that was a member of the recovery set. -// -// Errors are not generated in errorRecovery mode. -// -// ANTLR's error recovery mechanism is based upon original ideas: -// -// "Algorithms + Data Structures = Programs" by Niklaus Wirth -// -// and -// -// "A note on error recovery in recursive descent parsers": -// http://portal.acm.org/citation.cfm?id=947902.947905 -// -// Later, Josef Grosch had some good ideas: -// -// "Efficient and Comfortable Error Recovery in Recursive Descent -// Parsers": -// ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip -// -// Like Grosch I implement context-sensitive FOLLOW sets that are combined -// at run-time upon error to avoid overhead during parsing. -func (d *DefaultErrorStrategy) getErrorRecoverySet(recognizer Parser) *IntervalSet { - atn := recognizer.GetInterpreter().atn - ctx := recognizer.GetParserRuleContext() - recoverSet := NewIntervalSet() - for ctx != nil && ctx.GetInvokingState() >= 0 { - // compute what follows who invoked us - invokingState := atn.states[ctx.GetInvokingState()] - rt := invokingState.GetTransitions()[0] - follow := atn.NextTokens(rt.(*RuleTransition).followState, nil) - recoverSet.addSet(follow) - ctx = ctx.GetParent().(ParserRuleContext) - } - recoverSet.removeOne(TokenEpsilon) - return recoverSet -} - -// Consume tokens until one Matches the given token set.// -func (d *DefaultErrorStrategy) consumeUntil(recognizer Parser, set *IntervalSet) { - ttype := recognizer.GetTokenStream().LA(1) - for ttype != TokenEOF && !set.contains(ttype) { - recognizer.Consume() - ttype = recognizer.GetTokenStream().LA(1) - } -} - -// -// This implementation of {@link ANTLRErrorStrategy} responds to syntax errors -// by immediately canceling the parse operation with a -// {@link ParseCancellationException}. The implementation ensures that the -// {@link ParserRuleContext//exception} field is set for all parse tree nodes -// that were not completed prior to encountering the error. -// -//

-// This error strategy is useful in the following scenarios.

-// -//
    -//
  • Two-stage parsing: This error strategy allows the first -// stage of two-stage parsing to immediately terminate if an error is -// encountered, and immediately fall back to the second stage. In addition to -// avoiding wasted work by attempting to recover from errors here, the empty -// implementation of {@link BailErrorStrategy//Sync} improves the performance of -// the first stage.
  • -//
  • Silent validation: When syntax errors are not being -// Reported or logged, and the parse result is simply ignored if errors occur, -// the {@link BailErrorStrategy} avoids wasting work on recovering from errors -// when the result will be ignored either way.
  • -//
-// -//

-// {@code myparser.setErrorHandler(NewBailErrorStrategy())}

-// -// @see Parser//setErrorHandler(ANTLRErrorStrategy) - -type BailErrorStrategy struct { - *DefaultErrorStrategy -} - -var _ ErrorStrategy = &BailErrorStrategy{} - -func NewBailErrorStrategy() *BailErrorStrategy { - - b := new(BailErrorStrategy) - - b.DefaultErrorStrategy = NewDefaultErrorStrategy() - - return b -} - -// Instead of recovering from exception {@code e}, re-panic it wrapped -// in a {@link ParseCancellationException} so it is not caught by the -// rule func catches. Use {@link Exception//getCause()} to get the -// original {@link RecognitionException}. -func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) { - context := recognizer.GetParserRuleContext() - for context != nil { - context.SetException(e) - if parent, ok := context.GetParent().(ParserRuleContext); ok { - context = parent - } else { - context = nil - } - } - panic(NewParseCancellationException()) // TODO we don't emit e properly -} - -// Make sure we don't attempt to recover inline if the parser -// successfully recovers, it won't panic an exception. -func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token { - b.Recover(recognizer, NewInputMisMatchException(recognizer)) - - return nil -} - -// Make sure we don't attempt to recover from problems in subrules.// -func (b *BailErrorStrategy) Sync(recognizer Parser) { - // pass -} +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "reflect" + "strconv" + "strings" +) + +type ErrorStrategy interface { + reset(Parser) + RecoverInline(Parser) Token + Recover(Parser, RecognitionException) + Sync(Parser) + InErrorRecoveryMode(Parser) bool + ReportError(Parser, RecognitionException) + ReportMatch(Parser) +} + +// This is the default implementation of {@link ANTLRErrorStrategy} used for +// error Reporting and recovery in ANTLR parsers. +type DefaultErrorStrategy struct { + errorRecoveryMode bool + lastErrorIndex int + lastErrorStates *IntervalSet +} + +var _ ErrorStrategy = &DefaultErrorStrategy{} + +func NewDefaultErrorStrategy() *DefaultErrorStrategy { + + d := new(DefaultErrorStrategy) + + // Indicates whether the error strategy is currently "recovering from an + // error". This is used to suppress Reporting multiple error messages while + // attempting to recover from a detected syntax error. + // + // @see //InErrorRecoveryMode + // + d.errorRecoveryMode = false + + // The index into the input stream where the last error occurred. + // This is used to prevent infinite loops where an error is found + // but no token is consumed during recovery...another error is found, + // ad nauseum. This is a failsafe mechanism to guarantee that at least + // one token/tree node is consumed for two errors. + // + d.lastErrorIndex = -1 + d.lastErrorStates = nil + return d +} + +//

The default implementation simply calls {@link //endErrorCondition} to +// ensure that the handler is not in error recovery mode.

+func (d *DefaultErrorStrategy) reset(recognizer Parser) { + d.endErrorCondition(recognizer) +} + +// This method is called to enter error recovery mode when a recognition +// exception is Reported. +// +// @param recognizer the parser instance +func (d *DefaultErrorStrategy) beginErrorCondition(recognizer Parser) { + d.errorRecoveryMode = true +} + +func (d *DefaultErrorStrategy) InErrorRecoveryMode(recognizer Parser) bool { + return d.errorRecoveryMode +} + +// This method is called to leave error recovery mode after recovering from +// a recognition exception. +// +// @param recognizer +func (d *DefaultErrorStrategy) endErrorCondition(recognizer Parser) { + d.errorRecoveryMode = false + d.lastErrorStates = nil + d.lastErrorIndex = -1 +} + +// {@inheritDoc} +// +//

The default implementation simply calls {@link //endErrorCondition}.

+func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser) { + d.endErrorCondition(recognizer) +} + +// {@inheritDoc} +// +//

The default implementation returns immediately if the handler is already +// in error recovery mode. Otherwise, it calls {@link //beginErrorCondition} +// and dispatches the Reporting task based on the runtime type of {@code e} +// according to the following table.

+// +//
    +//
  • {@link NoViableAltException}: Dispatches the call to +// {@link //ReportNoViableAlternative}
  • +//
  • {@link InputMisMatchException}: Dispatches the call to +// {@link //ReportInputMisMatch}
  • +//
  • {@link FailedPredicateException}: Dispatches the call to +// {@link //ReportFailedPredicate}
  • +//
  • All other types: calls {@link Parser//NotifyErrorListeners} to Report +// the exception
  • +//
+func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionException) { + // if we've already Reported an error and have not Matched a token + // yet successfully, don't Report any errors. + if d.InErrorRecoveryMode(recognizer) { + return // don't Report spurious errors + } + d.beginErrorCondition(recognizer) + + switch t := e.(type) { + default: + fmt.Println("unknown recognition error type: " + reflect.TypeOf(e).Name()) + // fmt.Println(e.stack) + recognizer.NotifyErrorListeners(e.GetMessage(), e.GetOffendingToken(), e) + case *NoViableAltException: + d.ReportNoViableAlternative(recognizer, t) + case *InputMisMatchException: + d.ReportInputMisMatch(recognizer, t) + case *FailedPredicateException: + d.ReportFailedPredicate(recognizer, t) + } +} + +// {@inheritDoc} +// +//

The default implementation reSynchronizes the parser by consuming tokens +// until we find one in the reSynchronization set--loosely the set of tokens +// that can follow the current rule.

+func (d *DefaultErrorStrategy) Recover(recognizer Parser, e RecognitionException) { + + if d.lastErrorIndex == recognizer.GetInputStream().Index() && + d.lastErrorStates != nil && d.lastErrorStates.contains(recognizer.GetState()) { + // uh oh, another error at same token index and previously-Visited + // state in ATN must be a case where LT(1) is in the recovery + // token set so nothing got consumed. Consume a single token + // at least to prevent an infinite loop d is a failsafe. + recognizer.Consume() + } + d.lastErrorIndex = recognizer.GetInputStream().Index() + if d.lastErrorStates == nil { + d.lastErrorStates = NewIntervalSet() + } + d.lastErrorStates.addOne(recognizer.GetState()) + followSet := d.getErrorRecoverySet(recognizer) + d.consumeUntil(recognizer, followSet) +} + +// The default implementation of {@link ANTLRErrorStrategy//Sync} makes sure +// that the current lookahead symbol is consistent with what were expecting +// at d point in the ATN. You can call d anytime but ANTLR only +// generates code to check before subrules/loops and each iteration. +// +//

Implements Jim Idle's magic Sync mechanism in closures and optional +// subrules. E.g.,

+// +//
+// a : Sync ( stuff Sync )*
+// Sync : {consume to what can follow Sync}
+// 
+// +// At the start of a sub rule upon error, {@link //Sync} performs single +// token deletion, if possible. If it can't do that, it bails on the current +// rule and uses the default error recovery, which consumes until the +// reSynchronization set of the current rule. +// +//

If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block +// with an empty alternative), then the expected set includes what follows +// the subrule.

+// +//

During loop iteration, it consumes until it sees a token that can start a +// sub rule or what follows loop. Yes, that is pretty aggressive. We opt to +// stay in the loop as long as possible.

+// +//

ORIGINS

+// +//

Previous versions of ANTLR did a poor job of their recovery within loops. +// A single mismatch token or missing token would force the parser to bail +// out of the entire rules surrounding the loop. So, for rule

+// +//
+// classfunc : 'class' ID '{' member* '}'
+// 
+// +// input with an extra token between members would force the parser to +// consume until it found the next class definition rather than the next +// member definition of the current class. +// +//

This functionality cost a little bit of effort because the parser has to +// compare token set at the start of the loop and at each iteration. If for +// some reason speed is suffering for you, you can turn off d +// functionality by simply overriding d method as a blank { }.

+func (d *DefaultErrorStrategy) Sync(recognizer Parser) { + // If already recovering, don't try to Sync + if d.InErrorRecoveryMode(recognizer) { + return + } + + s := recognizer.GetInterpreter().atn.states[recognizer.GetState()] + la := recognizer.GetTokenStream().LA(1) + + // try cheaper subset first might get lucky. seems to shave a wee bit off + nextTokens := recognizer.GetATN().NextTokens(s, nil) + if nextTokens.contains(TokenEpsilon) || nextTokens.contains(la) { + return + } + + switch s.GetStateType() { + case ATNStateBlockStart, ATNStateStarBlockStart, ATNStatePlusBlockStart, ATNStateStarLoopEntry: + // Report error and recover if possible + if d.SingleTokenDeletion(recognizer) != nil { + return + } + panic(NewInputMisMatchException(recognizer)) + case ATNStatePlusLoopBack, ATNStateStarLoopBack: + d.ReportUnwantedToken(recognizer) + expecting := NewIntervalSet() + expecting.addSet(recognizer.GetExpectedTokens()) + whatFollowsLoopIterationOrRule := expecting.addSet(d.getErrorRecoverySet(recognizer)) + d.consumeUntil(recognizer, whatFollowsLoopIterationOrRule) + default: + // do nothing if we can't identify the exact kind of ATN state + } +} + +// This is called by {@link //ReportError} when the exception is a +// {@link NoViableAltException}. +// +// @see //ReportError +// +// @param recognizer the parser instance +// @param e the recognition exception +func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *NoViableAltException) { + tokens := recognizer.GetTokenStream() + var input string + if tokens != nil { + if e.startToken.GetTokenType() == TokenEOF { + input = "" + } else { + input = tokens.GetTextFromTokens(e.startToken, e.offendingToken) + } + } else { + input = "" + } + msg := "no viable alternative at input " + d.escapeWSAndQuote(input) + recognizer.NotifyErrorListeners(msg, e.offendingToken, e) +} + +// This is called by {@link //ReportError} when the exception is an +// {@link InputMisMatchException}. +// +// @see //ReportError +// +// @param recognizer the parser instance +// @param e the recognition exception +func (this *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException) { + msg := "mismatched input " + this.GetTokenErrorDisplay(e.offendingToken) + + " expecting " + e.getExpectedTokens().StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) + recognizer.NotifyErrorListeners(msg, e.offendingToken, e) +} + +// This is called by {@link //ReportError} when the exception is a +// {@link FailedPredicateException}. +// +// @see //ReportError +// +// @param recognizer the parser instance +// @param e the recognition exception +func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *FailedPredicateException) { + ruleName := recognizer.GetRuleNames()[recognizer.GetParserRuleContext().GetRuleIndex()] + msg := "rule " + ruleName + " " + e.message + recognizer.NotifyErrorListeners(msg, e.offendingToken, e) +} + +// This method is called to Report a syntax error which requires the removal +// of a token from the input stream. At the time d method is called, the +// erroneous symbol is current {@code LT(1)} symbol and has not yet been +// removed from the input stream. When d method returns, +// {@code recognizer} is in error recovery mode. +// +//

This method is called when {@link //singleTokenDeletion} identifies +// single-token deletion as a viable recovery strategy for a mismatched +// input error.

+// +//

The default implementation simply returns if the handler is already in +// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to +// enter error recovery mode, followed by calling +// {@link Parser//NotifyErrorListeners}.

+// +// @param recognizer the parser instance +func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) { + if d.InErrorRecoveryMode(recognizer) { + return + } + d.beginErrorCondition(recognizer) + t := recognizer.GetCurrentToken() + tokenName := d.GetTokenErrorDisplay(t) + expecting := d.GetExpectedTokens(recognizer) + msg := "extraneous input " + tokenName + " expecting " + + expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) + recognizer.NotifyErrorListeners(msg, t, nil) +} + +// This method is called to Report a syntax error which requires the +// insertion of a missing token into the input stream. At the time d +// method is called, the missing token has not yet been inserted. When d +// method returns, {@code recognizer} is in error recovery mode. +// +//

This method is called when {@link //singleTokenInsertion} identifies +// single-token insertion as a viable recovery strategy for a mismatched +// input error.

+// +//

The default implementation simply returns if the handler is already in +// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to +// enter error recovery mode, followed by calling +// {@link Parser//NotifyErrorListeners}.

+// +// @param recognizer the parser instance +func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) { + if d.InErrorRecoveryMode(recognizer) { + return + } + d.beginErrorCondition(recognizer) + t := recognizer.GetCurrentToken() + expecting := d.GetExpectedTokens(recognizer) + msg := "missing " + expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) + + " at " + d.GetTokenErrorDisplay(t) + recognizer.NotifyErrorListeners(msg, t, nil) +} + +//

The default implementation attempts to recover from the mismatched input +// by using single token insertion and deletion as described below. If the +// recovery attempt fails, d method panics an +// {@link InputMisMatchException}.

+// +//

EXTRA TOKEN (single token deletion)

+// +//

{@code LA(1)} is not what we are looking for. If {@code LA(2)} has the +// right token, however, then assume {@code LA(1)} is some extra spurious +// token and delete it. Then consume and return the next token (which was +// the {@code LA(2)} token) as the successful result of the Match operation.

+// +//

This recovery strategy is implemented by {@link +// //singleTokenDeletion}.

+// +//

MISSING TOKEN (single token insertion)

+// +//

If current token (at {@code LA(1)}) is consistent with what could come +// after the expected {@code LA(1)} token, then assume the token is missing +// and use the parser's {@link TokenFactory} to create it on the fly. The +// "insertion" is performed by returning the created token as the successful +// result of the Match operation.

+// +//

This recovery strategy is implemented by {@link +// //singleTokenInsertion}.

+// +//

EXAMPLE

+// +//

For example, Input {@code i=(3} is clearly missing the {@code ')'}. When +// the parser returns from the nested call to {@code expr}, it will have +// call chain:

+// +//
+// stat &rarr expr &rarr atom
+// 
+// +// and it will be trying to Match the {@code ')'} at d point in the +// derivation: +// +//
+// => ID '=' '(' INT ')' ('+' atom)* ”
+// ^
+// 
+// +// The attempt to Match {@code ')'} will fail when it sees {@code ”} and +// call {@link //recoverInline}. To recover, it sees that {@code LA(1)==”} +// is in the set of tokens that can follow the {@code ')'} token reference +// in rule {@code atom}. It can assume that you forgot the {@code ')'}. +func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token { + // SINGLE TOKEN DELETION + MatchedSymbol := d.SingleTokenDeletion(recognizer) + if MatchedSymbol != nil { + // we have deleted the extra token. + // now, move past ttype token as if all were ok + recognizer.Consume() + return MatchedSymbol + } + // SINGLE TOKEN INSERTION + if d.SingleTokenInsertion(recognizer) { + return d.GetMissingSymbol(recognizer) + } + // even that didn't work must panic the exception + panic(NewInputMisMatchException(recognizer)) +} + +// This method implements the single-token insertion inline error recovery +// strategy. It is called by {@link //recoverInline} if the single-token +// deletion strategy fails to recover from the mismatched input. If this +// method returns {@code true}, {@code recognizer} will be in error recovery +// mode. +// +//

This method determines whether or not single-token insertion is viable by +// checking if the {@code LA(1)} input symbol could be successfully Matched +// if it were instead the {@code LA(2)} symbol. If d method returns +// {@code true}, the caller is responsible for creating and inserting a +// token with the correct type to produce d behavior.

+// +// @param recognizer the parser instance +// @return {@code true} if single-token insertion is a viable recovery +// strategy for the current mismatched input, otherwise {@code false} +func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool { + currentSymbolType := recognizer.GetTokenStream().LA(1) + // if current token is consistent with what could come after current + // ATN state, then we know we're missing a token error recovery + // is free to conjure up and insert the missing token + atn := recognizer.GetInterpreter().atn + currentState := atn.states[recognizer.GetState()] + next := currentState.GetTransitions()[0].getTarget() + expectingAtLL2 := atn.NextTokens(next, recognizer.GetParserRuleContext()) + if expectingAtLL2.contains(currentSymbolType) { + d.ReportMissingToken(recognizer) + return true + } + + return false +} + +// This method implements the single-token deletion inline error recovery +// strategy. It is called by {@link //recoverInline} to attempt to recover +// from mismatched input. If this method returns nil, the parser and error +// handler state will not have changed. If this method returns non-nil, +// {@code recognizer} will not be in error recovery mode since the +// returned token was a successful Match. +// +//

If the single-token deletion is successful, d method calls +// {@link //ReportUnwantedToken} to Report the error, followed by +// {@link Parser//consume} to actually "delete" the extraneous token. Then, +// before returning {@link //ReportMatch} is called to signal a successful +// Match.

+// +// @param recognizer the parser instance +// @return the successfully Matched {@link Token} instance if single-token +// deletion successfully recovers from the mismatched input, otherwise +// {@code nil} +func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token { + NextTokenType := recognizer.GetTokenStream().LA(2) + expecting := d.GetExpectedTokens(recognizer) + if expecting.contains(NextTokenType) { + d.ReportUnwantedToken(recognizer) + // print("recoverFromMisMatchedToken deleting " \ + // + str(recognizer.GetTokenStream().LT(1)) \ + // + " since " + str(recognizer.GetTokenStream().LT(2)) \ + // + " is what we want", file=sys.stderr) + recognizer.Consume() // simply delete extra token + // we want to return the token we're actually Matching + MatchedSymbol := recognizer.GetCurrentToken() + d.ReportMatch(recognizer) // we know current token is correct + return MatchedSymbol + } + + return nil +} + +// Conjure up a missing token during error recovery. +// +// The recognizer attempts to recover from single missing +// symbols. But, actions might refer to that missing symbol. +// For example, x=ID {f($x)}. The action clearly assumes +// that there has been an identifier Matched previously and that +// $x points at that token. If that token is missing, but +// the next token in the stream is what we want we assume that +// d token is missing and we keep going. Because we +// have to return some token to replace the missing token, +// we have to conjure one up. This method gives the user control +// over the tokens returned for missing tokens. Mostly, +// you will want to create something special for identifier +// tokens. For literals such as '{' and ',', the default +// action in the parser or tree parser works. It simply creates +// a CommonToken of the appropriate type. The text will be the token. +// If you change what tokens must be created by the lexer, +// override d method to create the appropriate tokens. +func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token { + currentSymbol := recognizer.GetCurrentToken() + expecting := d.GetExpectedTokens(recognizer) + expectedTokenType := expecting.first() + var tokenText string + + if expectedTokenType == TokenEOF { + tokenText = "" + } else { + ln := recognizer.GetLiteralNames() + if expectedTokenType > 0 && expectedTokenType < len(ln) { + tokenText = "" + } else { + tokenText = "" // TODO matches the JS impl + } + } + current := currentSymbol + lookback := recognizer.GetTokenStream().LT(-1) + if current.GetTokenType() == TokenEOF && lookback != nil { + current = lookback + } + + tf := recognizer.GetTokenFactory() + + return tf.Create(current.GetSource(), expectedTokenType, tokenText, TokenDefaultChannel, -1, -1, current.GetLine(), current.GetColumn()) +} + +func (d *DefaultErrorStrategy) GetExpectedTokens(recognizer Parser) *IntervalSet { + return recognizer.GetExpectedTokens() +} + +// How should a token be displayed in an error message? The default +// is to display just the text, but during development you might +// want to have a lot of information spit out. Override in that case +// to use t.String() (which, for CommonToken, dumps everything about +// the token). This is better than forcing you to override a method in +// your token objects because you don't have to go modify your lexer +// so that it creates a NewJava type. +func (d *DefaultErrorStrategy) GetTokenErrorDisplay(t Token) string { + if t == nil { + return "" + } + s := t.GetText() + if s == "" { + if t.GetTokenType() == TokenEOF { + s = "" + } else { + s = "<" + strconv.Itoa(t.GetTokenType()) + ">" + } + } + return d.escapeWSAndQuote(s) +} + +func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string { + s = strings.Replace(s, "\t", "\\t", -1) + s = strings.Replace(s, "\n", "\\n", -1) + s = strings.Replace(s, "\r", "\\r", -1) + return "'" + s + "'" +} + +// Compute the error recovery set for the current rule. During +// rule invocation, the parser pushes the set of tokens that can +// follow that rule reference on the stack d amounts to +// computing FIRST of what follows the rule reference in the +// enclosing rule. See LinearApproximator.FIRST(). +// This local follow set only includes tokens +// from within the rule i.e., the FIRST computation done by +// ANTLR stops at the end of a rule. +// +// # EXAMPLE +// +// When you find a "no viable alt exception", the input is not +// consistent with any of the alternatives for rule r. The best +// thing to do is to consume tokens until you see something that +// can legally follow a call to r//or* any rule that called r. +// You don't want the exact set of viable next tokens because the +// input might just be missing a token--you might consume the +// rest of the input looking for one of the missing tokens. +// +// Consider grammar: +// +// a : '[' b ']' +// | '(' b ')' +// +// b : c '^' INT +// c : ID +// | INT +// +// At each rule invocation, the set of tokens that could follow +// that rule is pushed on a stack. Here are the various +// context-sensitive follow sets: +// +// FOLLOW(b1_in_a) = FIRST(']') = ']' +// FOLLOW(b2_in_a) = FIRST(')') = ')' +// FOLLOW(c_in_b) = FIRST('^') = '^' +// +// Upon erroneous input "[]", the call chain is +// +// a -> b -> c +// +// and, hence, the follow context stack is: +// +// depth follow set start of rule execution +// 0 a (from main()) +// 1 ']' b +// 2 '^' c +// +// Notice that ')' is not included, because b would have to have +// been called from a different context in rule a for ')' to be +// included. +// +// For error recovery, we cannot consider FOLLOW(c) +// (context-sensitive or otherwise). We need the combined set of +// all context-sensitive FOLLOW sets--the set of all tokens that +// could follow any reference in the call chain. We need to +// reSync to one of those tokens. Note that FOLLOW(c)='^' and if +// we reSync'd to that token, we'd consume until EOF. We need to +// Sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}. +// In this case, for input "[]", LA(1) is ']' and in the set, so we would +// not consume anything. After printing an error, rule c would +// return normally. Rule b would not find the required '^' though. +// At this point, it gets a mismatched token error and panics an +// exception (since LA(1) is not in the viable following token +// set). The rule exception handler tries to recover, but finds +// the same recovery set and doesn't consume anything. Rule b +// exits normally returning to rule a. Now it finds the ']' (and +// with the successful Match exits errorRecovery mode). +// +// So, you can see that the parser walks up the call chain looking +// for the token that was a member of the recovery set. +// +// Errors are not generated in errorRecovery mode. +// +// ANTLR's error recovery mechanism is based upon original ideas: +// +// "Algorithms + Data Structures = Programs" by Niklaus Wirth +// +// and +// +// "A note on error recovery in recursive descent parsers": +// http://portal.acm.org/citation.cfm?id=947902.947905 +// +// Later, Josef Grosch had some good ideas: +// +// "Efficient and Comfortable Error Recovery in Recursive Descent +// Parsers": +// ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip +// +// Like Grosch I implement context-sensitive FOLLOW sets that are combined +// at run-time upon error to avoid overhead during parsing. +func (d *DefaultErrorStrategy) getErrorRecoverySet(recognizer Parser) *IntervalSet { + atn := recognizer.GetInterpreter().atn + ctx := recognizer.GetParserRuleContext() + recoverSet := NewIntervalSet() + for ctx != nil && ctx.GetInvokingState() >= 0 { + // compute what follows who invoked us + invokingState := atn.states[ctx.GetInvokingState()] + rt := invokingState.GetTransitions()[0] + follow := atn.NextTokens(rt.(*RuleTransition).followState, nil) + recoverSet.addSet(follow) + ctx = ctx.GetParent().(ParserRuleContext) + } + recoverSet.removeOne(TokenEpsilon) + return recoverSet +} + +// Consume tokens until one Matches the given token set.// +func (d *DefaultErrorStrategy) consumeUntil(recognizer Parser, set *IntervalSet) { + ttype := recognizer.GetTokenStream().LA(1) + for ttype != TokenEOF && !set.contains(ttype) { + recognizer.Consume() + ttype = recognizer.GetTokenStream().LA(1) + } +} + +// +// This implementation of {@link ANTLRErrorStrategy} responds to syntax errors +// by immediately canceling the parse operation with a +// {@link ParseCancellationException}. The implementation ensures that the +// {@link ParserRuleContext//exception} field is set for all parse tree nodes +// that were not completed prior to encountering the error. +// +//

+// This error strategy is useful in the following scenarios.

+// +//
    +//
  • Two-stage parsing: This error strategy allows the first +// stage of two-stage parsing to immediately terminate if an error is +// encountered, and immediately fall back to the second stage. In addition to +// avoiding wasted work by attempting to recover from errors here, the empty +// implementation of {@link BailErrorStrategy//Sync} improves the performance of +// the first stage.
  • +//
  • Silent validation: When syntax errors are not being +// Reported or logged, and the parse result is simply ignored if errors occur, +// the {@link BailErrorStrategy} avoids wasting work on recovering from errors +// when the result will be ignored either way.
  • +//
+// +//

+// {@code myparser.setErrorHandler(NewBailErrorStrategy())}

+// +// @see Parser//setErrorHandler(ANTLRErrorStrategy) + +type BailErrorStrategy struct { + *DefaultErrorStrategy +} + +var _ ErrorStrategy = &BailErrorStrategy{} + +func NewBailErrorStrategy() *BailErrorStrategy { + + b := new(BailErrorStrategy) + + b.DefaultErrorStrategy = NewDefaultErrorStrategy() + + return b +} + +// Instead of recovering from exception {@code e}, re-panic it wrapped +// in a {@link ParseCancellationException} so it is not caught by the +// rule func catches. Use {@link Exception//getCause()} to get the +// original {@link RecognitionException}. +func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) { + context := recognizer.GetParserRuleContext() + for context != nil { + context.SetException(e) + if parent, ok := context.GetParent().(ParserRuleContext); ok { + context = parent + } else { + context = nil + } + } + panic(NewParseCancellationException()) // TODO we don't emit e properly +} + +// Make sure we don't attempt to recover inline if the parser +// successfully recovers, it won't panic an exception. +func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token { + b.Recover(recognizer, NewInputMisMatchException(recognizer)) + + return nil +} + +// Make sure we don't attempt to recover from problems in subrules.// +func (b *BailErrorStrategy) Sync(recognizer Parser) { + // pass +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/errors.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/errors.go index 3954c13782..acba949e18 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/errors.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/errors.go @@ -1,238 +1,238 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -// The root of the ANTLR exception hierarchy. In general, ANTLR tracks just -// 3 kinds of errors: prediction errors, failed predicate errors, and -// mismatched input errors. In each case, the parser knows where it is -// in the input, where it is in the ATN, the rule invocation stack, -// and what kind of problem occurred. - -type RecognitionException interface { - GetOffendingToken() Token - GetMessage() string - GetInputStream() IntStream -} - -type BaseRecognitionException struct { - message string - recognizer Recognizer - offendingToken Token - offendingState int - ctx RuleContext - input IntStream -} - -func NewBaseRecognitionException(message string, recognizer Recognizer, input IntStream, ctx RuleContext) *BaseRecognitionException { - - // todo - // Error.call(this) - // - // if (!!Error.captureStackTrace) { - // Error.captureStackTrace(this, RecognitionException) - // } else { - // stack := NewError().stack - // } - // TODO may be able to use - "runtime" func Stack(buf []byte, all bool) int - - t := new(BaseRecognitionException) - - t.message = message - t.recognizer = recognizer - t.input = input - t.ctx = ctx - // The current {@link Token} when an error occurred. Since not all streams - // support accessing symbols by index, we have to track the {@link Token} - // instance itself. - t.offendingToken = nil - // Get the ATN state number the parser was in at the time the error - // occurred. For {@link NoViableAltException} and - // {@link LexerNoViableAltException} exceptions, this is the - // {@link DecisionState} number. For others, it is the state whose outgoing - // edge we couldn't Match. - t.offendingState = -1 - if t.recognizer != nil { - t.offendingState = t.recognizer.GetState() - } - - return t -} - -func (b *BaseRecognitionException) GetMessage() string { - return b.message -} - -func (b *BaseRecognitionException) GetOffendingToken() Token { - return b.offendingToken -} - -func (b *BaseRecognitionException) GetInputStream() IntStream { - return b.input -} - -//

If the state number is not known, b method returns -1.

- -// Gets the set of input symbols which could potentially follow the -// previously Matched symbol at the time b exception was panicn. -// -//

If the set of expected tokens is not known and could not be computed, -// b method returns {@code nil}.

-// -// @return The set of token types that could potentially follow the current -// state in the ATN, or {@code nil} if the information is not available. -// / -func (b *BaseRecognitionException) getExpectedTokens() *IntervalSet { - if b.recognizer != nil { - return b.recognizer.GetATN().getExpectedTokens(b.offendingState, b.ctx) - } - - return nil -} - -func (b *BaseRecognitionException) String() string { - return b.message -} - -type LexerNoViableAltException struct { - *BaseRecognitionException - - startIndex int - deadEndConfigs ATNConfigSet -} - -func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs ATNConfigSet) *LexerNoViableAltException { - - l := new(LexerNoViableAltException) - - l.BaseRecognitionException = NewBaseRecognitionException("", lexer, input, nil) - - l.startIndex = startIndex - l.deadEndConfigs = deadEndConfigs - - return l -} - -func (l *LexerNoViableAltException) String() string { - symbol := "" - if l.startIndex >= 0 && l.startIndex < l.input.Size() { - symbol = l.input.(CharStream).GetTextFromInterval(NewInterval(l.startIndex, l.startIndex)) - } - return "LexerNoViableAltException" + symbol -} - -type NoViableAltException struct { - *BaseRecognitionException - - startToken Token - offendingToken Token - ctx ParserRuleContext - deadEndConfigs ATNConfigSet -} - -// Indicates that the parser could not decide which of two or more paths -// to take based upon the remaining input. It tracks the starting token -// of the offending input and also knows where the parser was -// in the various paths when the error. Reported by ReportNoViableAlternative() -func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs ATNConfigSet, ctx ParserRuleContext) *NoViableAltException { - - if ctx == nil { - ctx = recognizer.GetParserRuleContext() - } - - if offendingToken == nil { - offendingToken = recognizer.GetCurrentToken() - } - - if startToken == nil { - startToken = recognizer.GetCurrentToken() - } - - if input == nil { - input = recognizer.GetInputStream().(TokenStream) - } - - n := new(NoViableAltException) - n.BaseRecognitionException = NewBaseRecognitionException("", recognizer, input, ctx) - - // Which configurations did we try at input.Index() that couldn't Match - // input.LT(1)?// - n.deadEndConfigs = deadEndConfigs - // The token object at the start index the input stream might - // not be buffering tokens so get a reference to it. (At the - // time the error occurred, of course the stream needs to keep a - // buffer all of the tokens but later we might not have access to those.) - n.startToken = startToken - n.offendingToken = offendingToken - - return n -} - -type InputMisMatchException struct { - *BaseRecognitionException -} - -// This signifies any kind of mismatched input exceptions such as -// when the current input does not Match the expected token. -func NewInputMisMatchException(recognizer Parser) *InputMisMatchException { - - i := new(InputMisMatchException) - i.BaseRecognitionException = NewBaseRecognitionException("", recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext()) - - i.offendingToken = recognizer.GetCurrentToken() - - return i - -} - -// A semantic predicate failed during validation. Validation of predicates -// occurs when normally parsing the alternative just like Matching a token. -// Disambiguating predicate evaluation occurs when we test a predicate during -// prediction. - -type FailedPredicateException struct { - *BaseRecognitionException - - ruleIndex int - predicateIndex int - predicate string -} - -func NewFailedPredicateException(recognizer Parser, predicate string, message string) *FailedPredicateException { - - f := new(FailedPredicateException) - - f.BaseRecognitionException = NewBaseRecognitionException(f.formatMessage(predicate, message), recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext()) - - s := recognizer.GetInterpreter().atn.states[recognizer.GetState()] - trans := s.GetTransitions()[0] - if trans2, ok := trans.(*PredicateTransition); ok { - f.ruleIndex = trans2.ruleIndex - f.predicateIndex = trans2.predIndex - } else { - f.ruleIndex = 0 - f.predicateIndex = 0 - } - f.predicate = predicate - f.offendingToken = recognizer.GetCurrentToken() - - return f -} - -func (f *FailedPredicateException) formatMessage(predicate, message string) string { - if message != "" { - return message - } - - return "failed predicate: {" + predicate + "}?" -} - -type ParseCancellationException struct { -} - -func NewParseCancellationException() *ParseCancellationException { - // Error.call(this) - // Error.captureStackTrace(this, ParseCancellationException) - return new(ParseCancellationException) -} +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// The root of the ANTLR exception hierarchy. In general, ANTLR tracks just +// 3 kinds of errors: prediction errors, failed predicate errors, and +// mismatched input errors. In each case, the parser knows where it is +// in the input, where it is in the ATN, the rule invocation stack, +// and what kind of problem occurred. + +type RecognitionException interface { + GetOffendingToken() Token + GetMessage() string + GetInputStream() IntStream +} + +type BaseRecognitionException struct { + message string + recognizer Recognizer + offendingToken Token + offendingState int + ctx RuleContext + input IntStream +} + +func NewBaseRecognitionException(message string, recognizer Recognizer, input IntStream, ctx RuleContext) *BaseRecognitionException { + + // todo + // Error.call(this) + // + // if (!!Error.captureStackTrace) { + // Error.captureStackTrace(this, RecognitionException) + // } else { + // stack := NewError().stack + // } + // TODO may be able to use - "runtime" func Stack(buf []byte, all bool) int + + t := new(BaseRecognitionException) + + t.message = message + t.recognizer = recognizer + t.input = input + t.ctx = ctx + // The current {@link Token} when an error occurred. Since not all streams + // support accessing symbols by index, we have to track the {@link Token} + // instance itself. + t.offendingToken = nil + // Get the ATN state number the parser was in at the time the error + // occurred. For {@link NoViableAltException} and + // {@link LexerNoViableAltException} exceptions, this is the + // {@link DecisionState} number. For others, it is the state whose outgoing + // edge we couldn't Match. + t.offendingState = -1 + if t.recognizer != nil { + t.offendingState = t.recognizer.GetState() + } + + return t +} + +func (b *BaseRecognitionException) GetMessage() string { + return b.message +} + +func (b *BaseRecognitionException) GetOffendingToken() Token { + return b.offendingToken +} + +func (b *BaseRecognitionException) GetInputStream() IntStream { + return b.input +} + +//

If the state number is not known, b method returns -1.

+ +// Gets the set of input symbols which could potentially follow the +// previously Matched symbol at the time b exception was panicn. +// +//

If the set of expected tokens is not known and could not be computed, +// b method returns {@code nil}.

+// +// @return The set of token types that could potentially follow the current +// state in the ATN, or {@code nil} if the information is not available. +// / +func (b *BaseRecognitionException) getExpectedTokens() *IntervalSet { + if b.recognizer != nil { + return b.recognizer.GetATN().getExpectedTokens(b.offendingState, b.ctx) + } + + return nil +} + +func (b *BaseRecognitionException) String() string { + return b.message +} + +type LexerNoViableAltException struct { + *BaseRecognitionException + + startIndex int + deadEndConfigs ATNConfigSet +} + +func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs ATNConfigSet) *LexerNoViableAltException { + + l := new(LexerNoViableAltException) + + l.BaseRecognitionException = NewBaseRecognitionException("", lexer, input, nil) + + l.startIndex = startIndex + l.deadEndConfigs = deadEndConfigs + + return l +} + +func (l *LexerNoViableAltException) String() string { + symbol := "" + if l.startIndex >= 0 && l.startIndex < l.input.Size() { + symbol = l.input.(CharStream).GetTextFromInterval(NewInterval(l.startIndex, l.startIndex)) + } + return "LexerNoViableAltException" + symbol +} + +type NoViableAltException struct { + *BaseRecognitionException + + startToken Token + offendingToken Token + ctx ParserRuleContext + deadEndConfigs ATNConfigSet +} + +// Indicates that the parser could not decide which of two or more paths +// to take based upon the remaining input. It tracks the starting token +// of the offending input and also knows where the parser was +// in the various paths when the error. Reported by ReportNoViableAlternative() +func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs ATNConfigSet, ctx ParserRuleContext) *NoViableAltException { + + if ctx == nil { + ctx = recognizer.GetParserRuleContext() + } + + if offendingToken == nil { + offendingToken = recognizer.GetCurrentToken() + } + + if startToken == nil { + startToken = recognizer.GetCurrentToken() + } + + if input == nil { + input = recognizer.GetInputStream().(TokenStream) + } + + n := new(NoViableAltException) + n.BaseRecognitionException = NewBaseRecognitionException("", recognizer, input, ctx) + + // Which configurations did we try at input.Index() that couldn't Match + // input.LT(1)?// + n.deadEndConfigs = deadEndConfigs + // The token object at the start index the input stream might + // not be buffering tokens so get a reference to it. (At the + // time the error occurred, of course the stream needs to keep a + // buffer all of the tokens but later we might not have access to those.) + n.startToken = startToken + n.offendingToken = offendingToken + + return n +} + +type InputMisMatchException struct { + *BaseRecognitionException +} + +// This signifies any kind of mismatched input exceptions such as +// when the current input does not Match the expected token. +func NewInputMisMatchException(recognizer Parser) *InputMisMatchException { + + i := new(InputMisMatchException) + i.BaseRecognitionException = NewBaseRecognitionException("", recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext()) + + i.offendingToken = recognizer.GetCurrentToken() + + return i + +} + +// A semantic predicate failed during validation. Validation of predicates +// occurs when normally parsing the alternative just like Matching a token. +// Disambiguating predicate evaluation occurs when we test a predicate during +// prediction. + +type FailedPredicateException struct { + *BaseRecognitionException + + ruleIndex int + predicateIndex int + predicate string +} + +func NewFailedPredicateException(recognizer Parser, predicate string, message string) *FailedPredicateException { + + f := new(FailedPredicateException) + + f.BaseRecognitionException = NewBaseRecognitionException(f.formatMessage(predicate, message), recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext()) + + s := recognizer.GetInterpreter().atn.states[recognizer.GetState()] + trans := s.GetTransitions()[0] + if trans2, ok := trans.(*PredicateTransition); ok { + f.ruleIndex = trans2.ruleIndex + f.predicateIndex = trans2.predIndex + } else { + f.ruleIndex = 0 + f.predicateIndex = 0 + } + f.predicate = predicate + f.offendingToken = recognizer.GetCurrentToken() + + return f +} + +func (f *FailedPredicateException) formatMessage(predicate, message string) string { + if message != "" { + return message + } + + return "failed predicate: {" + predicate + "}?" +} + +type ParseCancellationException struct { +} + +func NewParseCancellationException() *ParseCancellationException { + // Error.call(this) + // Error.captureStackTrace(this, ParseCancellationException) + return new(ParseCancellationException) +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/file_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/file_stream.go index bd6ad5efe3..c044270787 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/file_stream.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/file_stream.go @@ -1,49 +1,49 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "bytes" - "io" - "os" -) - -// This is an InputStream that is loaded from a file all at once -// when you construct the object. - -type FileStream struct { - *InputStream - - filename string -} - -func NewFileStream(fileName string) (*FileStream, error) { - - buf := bytes.NewBuffer(nil) - - f, err := os.Open(fileName) - if err != nil { - return nil, err - } - defer f.Close() - _, err = io.Copy(buf, f) - if err != nil { - return nil, err - } - - fs := new(FileStream) - - fs.filename = fileName - s := string(buf.Bytes()) - - fs.InputStream = NewInputStream(s) - - return fs, nil - -} - -func (f *FileStream) GetSourceName() string { - return f.filename -} +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "bytes" + "io" + "os" +) + +// This is an InputStream that is loaded from a file all at once +// when you construct the object. + +type FileStream struct { + *InputStream + + filename string +} + +func NewFileStream(fileName string) (*FileStream, error) { + + buf := bytes.NewBuffer(nil) + + f, err := os.Open(fileName) + if err != nil { + return nil, err + } + defer f.Close() + _, err = io.Copy(buf, f) + if err != nil { + return nil, err + } + + fs := new(FileStream) + + fs.filename = fileName + s := string(buf.Bytes()) + + fs.InputStream = NewInputStream(s) + + return fs, nil + +} + +func (f *FileStream) GetSourceName() string { + return f.filename +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go index a8b889cedb..22bd585aa6 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/input_stream.go @@ -1,113 +1,113 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -type InputStream struct { - name string - index int - data []rune - size int -} - -func NewInputStream(data string) *InputStream { - - is := new(InputStream) - - is.name = "" - is.index = 0 - is.data = []rune(data) - is.size = len(is.data) // number of runes - - return is -} - -func (is *InputStream) reset() { - is.index = 0 -} - -func (is *InputStream) Consume() { - if is.index >= is.size { - // assert is.LA(1) == TokenEOF - panic("cannot consume EOF") - } - is.index++ -} - -func (is *InputStream) LA(offset int) int { - - if offset == 0 { - return 0 // nil - } - if offset < 0 { - offset++ // e.g., translate LA(-1) to use offset=0 - } - pos := is.index + offset - 1 - - if pos < 0 || pos >= is.size { // invalid - return TokenEOF - } - - return int(is.data[pos]) -} - -func (is *InputStream) LT(offset int) int { - return is.LA(offset) -} - -func (is *InputStream) Index() int { - return is.index -} - -func (is *InputStream) Size() int { - return is.size -} - -// mark/release do nothing we have entire buffer -func (is *InputStream) Mark() int { - return -1 -} - -func (is *InputStream) Release(marker int) { -} - -func (is *InputStream) Seek(index int) { - if index <= is.index { - is.index = index // just jump don't update stream state (line,...) - return - } - // seek forward - is.index = intMin(index, is.size) -} - -func (is *InputStream) GetText(start int, stop int) string { - if stop >= is.size { - stop = is.size - 1 - } - if start >= is.size { - return "" - } - - return string(is.data[start : stop+1]) -} - -func (is *InputStream) GetTextFromTokens(start, stop Token) string { - if start != nil && stop != nil { - return is.GetTextFromInterval(NewInterval(start.GetTokenIndex(), stop.GetTokenIndex())) - } - - return "" -} - -func (is *InputStream) GetTextFromInterval(i *Interval) string { - return is.GetText(i.Start, i.Stop) -} - -func (*InputStream) GetSourceName() string { - return "Obtained from string" -} - -func (is *InputStream) String() string { - return string(is.data) -} +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +type InputStream struct { + name string + index int + data []rune + size int +} + +func NewInputStream(data string) *InputStream { + + is := new(InputStream) + + is.name = "" + is.index = 0 + is.data = []rune(data) + is.size = len(is.data) // number of runes + + return is +} + +func (is *InputStream) reset() { + is.index = 0 +} + +func (is *InputStream) Consume() { + if is.index >= is.size { + // assert is.LA(1) == TokenEOF + panic("cannot consume EOF") + } + is.index++ +} + +func (is *InputStream) LA(offset int) int { + + if offset == 0 { + return 0 // nil + } + if offset < 0 { + offset++ // e.g., translate LA(-1) to use offset=0 + } + pos := is.index + offset - 1 + + if pos < 0 || pos >= is.size { // invalid + return TokenEOF + } + + return int(is.data[pos]) +} + +func (is *InputStream) LT(offset int) int { + return is.LA(offset) +} + +func (is *InputStream) Index() int { + return is.index +} + +func (is *InputStream) Size() int { + return is.size +} + +// mark/release do nothing we have entire buffer +func (is *InputStream) Mark() int { + return -1 +} + +func (is *InputStream) Release(marker int) { +} + +func (is *InputStream) Seek(index int) { + if index <= is.index { + is.index = index // just jump don't update stream state (line,...) + return + } + // seek forward + is.index = intMin(index, is.size) +} + +func (is *InputStream) GetText(start int, stop int) string { + if stop >= is.size { + stop = is.size - 1 + } + if start >= is.size { + return "" + } + + return string(is.data[start : stop+1]) +} + +func (is *InputStream) GetTextFromTokens(start, stop Token) string { + if start != nil && stop != nil { + return is.GetTextFromInterval(NewInterval(start.GetTokenIndex(), stop.GetTokenIndex())) + } + + return "" +} + +func (is *InputStream) GetTextFromInterval(i *Interval) string { + return is.GetText(i.Start, i.Stop) +} + +func (*InputStream) GetSourceName() string { + return "Obtained from string" +} + +func (is *InputStream) String() string { + return string(is.data) +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/int_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/int_stream.go index 4778878bd0..e11e175be3 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/int_stream.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/int_stream.go @@ -1,16 +1,16 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -type IntStream interface { - Consume() - LA(int) int - Mark() int - Release(marker int) - Index() int - Seek(index int) - Size() int - GetSourceName() string -} +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +type IntStream interface { + Consume() + LA(int) int + Mark() int + Release(marker int) + Index() int + Seek(index int) + Size() int + GetSourceName() string +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/interval_set.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/interval_set.go index c1e155e818..34dc4c867c 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/interval_set.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/interval_set.go @@ -1,312 +1,312 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "strconv" - "strings" -) - -type Interval struct { - Start int - Stop int -} - -/* stop is not included! */ -func NewInterval(start, stop int) *Interval { - i := new(Interval) - - i.Start = start - i.Stop = stop - return i -} - -func (i *Interval) Contains(item int) bool { - return item >= i.Start && item < i.Stop -} - -func (i *Interval) String() string { - if i.Start == i.Stop-1 { - return strconv.Itoa(i.Start) - } - - return strconv.Itoa(i.Start) + ".." + strconv.Itoa(i.Stop-1) -} - -func (i *Interval) length() int { - return i.Stop - i.Start -} - -type IntervalSet struct { - intervals []*Interval - readOnly bool -} - -func NewIntervalSet() *IntervalSet { - - i := new(IntervalSet) - - i.intervals = nil - i.readOnly = false - - return i -} - -func (i *IntervalSet) first() int { - if len(i.intervals) == 0 { - return TokenInvalidType - } - - return i.intervals[0].Start -} - -func (i *IntervalSet) addOne(v int) { - i.addInterval(NewInterval(v, v+1)) -} - -func (i *IntervalSet) addRange(l, h int) { - i.addInterval(NewInterval(l, h+1)) -} - -func (i *IntervalSet) addInterval(v *Interval) { - if i.intervals == nil { - i.intervals = make([]*Interval, 0) - i.intervals = append(i.intervals, v) - } else { - // find insert pos - for k, interval := range i.intervals { - // distinct range -> insert - if v.Stop < interval.Start { - i.intervals = append(i.intervals[0:k], append([]*Interval{v}, i.intervals[k:]...)...) - return - } else if v.Stop == interval.Start { - i.intervals[k].Start = v.Start - return - } else if v.Start <= interval.Stop { - i.intervals[k] = NewInterval(intMin(interval.Start, v.Start), intMax(interval.Stop, v.Stop)) - - // if not applying to end, merge potential overlaps - if k < len(i.intervals)-1 { - l := i.intervals[k] - r := i.intervals[k+1] - // if r contained in l - if l.Stop >= r.Stop { - i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...) - } else if l.Stop >= r.Start { // partial overlap - i.intervals[k] = NewInterval(l.Start, r.Stop) - i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...) - } - } - return - } - } - // greater than any exiting - i.intervals = append(i.intervals, v) - } -} - -func (i *IntervalSet) addSet(other *IntervalSet) *IntervalSet { - if other.intervals != nil { - for k := 0; k < len(other.intervals); k++ { - i2 := other.intervals[k] - i.addInterval(NewInterval(i2.Start, i2.Stop)) - } - } - return i -} - -func (i *IntervalSet) complement(start int, stop int) *IntervalSet { - result := NewIntervalSet() - result.addInterval(NewInterval(start, stop+1)) - for j := 0; j < len(i.intervals); j++ { - result.removeRange(i.intervals[j]) - } - return result -} - -func (i *IntervalSet) contains(item int) bool { - if i.intervals == nil { - return false - } - for k := 0; k < len(i.intervals); k++ { - if i.intervals[k].Contains(item) { - return true - } - } - return false -} - -func (i *IntervalSet) length() int { - len := 0 - - for _, v := range i.intervals { - len += v.length() - } - - return len -} - -func (i *IntervalSet) removeRange(v *Interval) { - if v.Start == v.Stop-1 { - i.removeOne(v.Start) - } else if i.intervals != nil { - k := 0 - for n := 0; n < len(i.intervals); n++ { - ni := i.intervals[k] - // intervals are ordered - if v.Stop <= ni.Start { - return - } else if v.Start > ni.Start && v.Stop < ni.Stop { - i.intervals[k] = NewInterval(ni.Start, v.Start) - x := NewInterval(v.Stop, ni.Stop) - // i.intervals.splice(k, 0, x) - i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...) - return - } else if v.Start <= ni.Start && v.Stop >= ni.Stop { - // i.intervals.splice(k, 1) - i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...) - k = k - 1 // need another pass - } else if v.Start < ni.Stop { - i.intervals[k] = NewInterval(ni.Start, v.Start) - } else if v.Stop < ni.Stop { - i.intervals[k] = NewInterval(v.Stop, ni.Stop) - } - k++ - } - } -} - -func (i *IntervalSet) removeOne(v int) { - if i.intervals != nil { - for k := 0; k < len(i.intervals); k++ { - ki := i.intervals[k] - // intervals i ordered - if v < ki.Start { - return - } else if v == ki.Start && v == ki.Stop-1 { - // i.intervals.splice(k, 1) - i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...) - return - } else if v == ki.Start { - i.intervals[k] = NewInterval(ki.Start+1, ki.Stop) - return - } else if v == ki.Stop-1 { - i.intervals[k] = NewInterval(ki.Start, ki.Stop-1) - return - } else if v < ki.Stop-1 { - x := NewInterval(ki.Start, v) - ki.Start = v + 1 - // i.intervals.splice(k, 0, x) - i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...) - return - } - } - } -} - -func (i *IntervalSet) String() string { - return i.StringVerbose(nil, nil, false) -} - -func (i *IntervalSet) StringVerbose(literalNames []string, symbolicNames []string, elemsAreChar bool) string { - - if i.intervals == nil { - return "{}" - } else if literalNames != nil || symbolicNames != nil { - return i.toTokenString(literalNames, symbolicNames) - } else if elemsAreChar { - return i.toCharString() - } - - return i.toIndexString() -} - -func (i *IntervalSet) GetIntervals() []*Interval { - return i.intervals -} - -func (i *IntervalSet) toCharString() string { - names := make([]string, len(i.intervals)) - - var sb strings.Builder - - for j := 0; j < len(i.intervals); j++ { - v := i.intervals[j] - if v.Stop == v.Start+1 { - if v.Start == TokenEOF { - names = append(names, "") - } else { - sb.WriteByte('\'') - sb.WriteRune(rune(v.Start)) - sb.WriteByte('\'') - names = append(names, sb.String()) - sb.Reset() - } - } else { - sb.WriteByte('\'') - sb.WriteRune(rune(v.Start)) - sb.WriteString("'..'") - sb.WriteRune(rune(v.Stop - 1)) - sb.WriteByte('\'') - names = append(names, sb.String()) - sb.Reset() - } - } - if len(names) > 1 { - return "{" + strings.Join(names, ", ") + "}" - } - - return names[0] -} - -func (i *IntervalSet) toIndexString() string { - - names := make([]string, 0) - for j := 0; j < len(i.intervals); j++ { - v := i.intervals[j] - if v.Stop == v.Start+1 { - if v.Start == TokenEOF { - names = append(names, "") - } else { - names = append(names, strconv.Itoa(v.Start)) - } - } else { - names = append(names, strconv.Itoa(v.Start)+".."+strconv.Itoa(v.Stop-1)) - } - } - if len(names) > 1 { - return "{" + strings.Join(names, ", ") + "}" - } - - return names[0] -} - -func (i *IntervalSet) toTokenString(literalNames []string, symbolicNames []string) string { - names := make([]string, 0) - for _, v := range i.intervals { - for j := v.Start; j < v.Stop; j++ { - names = append(names, i.elementName(literalNames, symbolicNames, j)) - } - } - if len(names) > 1 { - return "{" + strings.Join(names, ", ") + "}" - } - - return names[0] -} - -func (i *IntervalSet) elementName(literalNames []string, symbolicNames []string, a int) string { - if a == TokenEOF { - return "" - } else if a == TokenEpsilon { - return "" - } else { - if a < len(literalNames) && literalNames[a] != "" { - return literalNames[a] - } - - return symbolicNames[a] - } -} +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "strconv" + "strings" +) + +type Interval struct { + Start int + Stop int +} + +/* stop is not included! */ +func NewInterval(start, stop int) *Interval { + i := new(Interval) + + i.Start = start + i.Stop = stop + return i +} + +func (i *Interval) Contains(item int) bool { + return item >= i.Start && item < i.Stop +} + +func (i *Interval) String() string { + if i.Start == i.Stop-1 { + return strconv.Itoa(i.Start) + } + + return strconv.Itoa(i.Start) + ".." + strconv.Itoa(i.Stop-1) +} + +func (i *Interval) length() int { + return i.Stop - i.Start +} + +type IntervalSet struct { + intervals []*Interval + readOnly bool +} + +func NewIntervalSet() *IntervalSet { + + i := new(IntervalSet) + + i.intervals = nil + i.readOnly = false + + return i +} + +func (i *IntervalSet) first() int { + if len(i.intervals) == 0 { + return TokenInvalidType + } + + return i.intervals[0].Start +} + +func (i *IntervalSet) addOne(v int) { + i.addInterval(NewInterval(v, v+1)) +} + +func (i *IntervalSet) addRange(l, h int) { + i.addInterval(NewInterval(l, h+1)) +} + +func (i *IntervalSet) addInterval(v *Interval) { + if i.intervals == nil { + i.intervals = make([]*Interval, 0) + i.intervals = append(i.intervals, v) + } else { + // find insert pos + for k, interval := range i.intervals { + // distinct range -> insert + if v.Stop < interval.Start { + i.intervals = append(i.intervals[0:k], append([]*Interval{v}, i.intervals[k:]...)...) + return + } else if v.Stop == interval.Start { + i.intervals[k].Start = v.Start + return + } else if v.Start <= interval.Stop { + i.intervals[k] = NewInterval(intMin(interval.Start, v.Start), intMax(interval.Stop, v.Stop)) + + // if not applying to end, merge potential overlaps + if k < len(i.intervals)-1 { + l := i.intervals[k] + r := i.intervals[k+1] + // if r contained in l + if l.Stop >= r.Stop { + i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...) + } else if l.Stop >= r.Start { // partial overlap + i.intervals[k] = NewInterval(l.Start, r.Stop) + i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...) + } + } + return + } + } + // greater than any exiting + i.intervals = append(i.intervals, v) + } +} + +func (i *IntervalSet) addSet(other *IntervalSet) *IntervalSet { + if other.intervals != nil { + for k := 0; k < len(other.intervals); k++ { + i2 := other.intervals[k] + i.addInterval(NewInterval(i2.Start, i2.Stop)) + } + } + return i +} + +func (i *IntervalSet) complement(start int, stop int) *IntervalSet { + result := NewIntervalSet() + result.addInterval(NewInterval(start, stop+1)) + for j := 0; j < len(i.intervals); j++ { + result.removeRange(i.intervals[j]) + } + return result +} + +func (i *IntervalSet) contains(item int) bool { + if i.intervals == nil { + return false + } + for k := 0; k < len(i.intervals); k++ { + if i.intervals[k].Contains(item) { + return true + } + } + return false +} + +func (i *IntervalSet) length() int { + len := 0 + + for _, v := range i.intervals { + len += v.length() + } + + return len +} + +func (i *IntervalSet) removeRange(v *Interval) { + if v.Start == v.Stop-1 { + i.removeOne(v.Start) + } else if i.intervals != nil { + k := 0 + for n := 0; n < len(i.intervals); n++ { + ni := i.intervals[k] + // intervals are ordered + if v.Stop <= ni.Start { + return + } else if v.Start > ni.Start && v.Stop < ni.Stop { + i.intervals[k] = NewInterval(ni.Start, v.Start) + x := NewInterval(v.Stop, ni.Stop) + // i.intervals.splice(k, 0, x) + i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...) + return + } else if v.Start <= ni.Start && v.Stop >= ni.Stop { + // i.intervals.splice(k, 1) + i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...) + k = k - 1 // need another pass + } else if v.Start < ni.Stop { + i.intervals[k] = NewInterval(ni.Start, v.Start) + } else if v.Stop < ni.Stop { + i.intervals[k] = NewInterval(v.Stop, ni.Stop) + } + k++ + } + } +} + +func (i *IntervalSet) removeOne(v int) { + if i.intervals != nil { + for k := 0; k < len(i.intervals); k++ { + ki := i.intervals[k] + // intervals i ordered + if v < ki.Start { + return + } else if v == ki.Start && v == ki.Stop-1 { + // i.intervals.splice(k, 1) + i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...) + return + } else if v == ki.Start { + i.intervals[k] = NewInterval(ki.Start+1, ki.Stop) + return + } else if v == ki.Stop-1 { + i.intervals[k] = NewInterval(ki.Start, ki.Stop-1) + return + } else if v < ki.Stop-1 { + x := NewInterval(ki.Start, v) + ki.Start = v + 1 + // i.intervals.splice(k, 0, x) + i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...) + return + } + } + } +} + +func (i *IntervalSet) String() string { + return i.StringVerbose(nil, nil, false) +} + +func (i *IntervalSet) StringVerbose(literalNames []string, symbolicNames []string, elemsAreChar bool) string { + + if i.intervals == nil { + return "{}" + } else if literalNames != nil || symbolicNames != nil { + return i.toTokenString(literalNames, symbolicNames) + } else if elemsAreChar { + return i.toCharString() + } + + return i.toIndexString() +} + +func (i *IntervalSet) GetIntervals() []*Interval { + return i.intervals +} + +func (i *IntervalSet) toCharString() string { + names := make([]string, len(i.intervals)) + + var sb strings.Builder + + for j := 0; j < len(i.intervals); j++ { + v := i.intervals[j] + if v.Stop == v.Start+1 { + if v.Start == TokenEOF { + names = append(names, "") + } else { + sb.WriteByte('\'') + sb.WriteRune(rune(v.Start)) + sb.WriteByte('\'') + names = append(names, sb.String()) + sb.Reset() + } + } else { + sb.WriteByte('\'') + sb.WriteRune(rune(v.Start)) + sb.WriteString("'..'") + sb.WriteRune(rune(v.Stop - 1)) + sb.WriteByte('\'') + names = append(names, sb.String()) + sb.Reset() + } + } + if len(names) > 1 { + return "{" + strings.Join(names, ", ") + "}" + } + + return names[0] +} + +func (i *IntervalSet) toIndexString() string { + + names := make([]string, 0) + for j := 0; j < len(i.intervals); j++ { + v := i.intervals[j] + if v.Stop == v.Start+1 { + if v.Start == TokenEOF { + names = append(names, "") + } else { + names = append(names, strconv.Itoa(v.Start)) + } + } else { + names = append(names, strconv.Itoa(v.Start)+".."+strconv.Itoa(v.Stop-1)) + } + } + if len(names) > 1 { + return "{" + strings.Join(names, ", ") + "}" + } + + return names[0] +} + +func (i *IntervalSet) toTokenString(literalNames []string, symbolicNames []string) string { + names := make([]string, 0) + for _, v := range i.intervals { + for j := v.Start; j < v.Stop; j++ { + names = append(names, i.elementName(literalNames, symbolicNames, j)) + } + } + if len(names) > 1 { + return "{" + strings.Join(names, ", ") + "}" + } + + return names[0] +} + +func (i *IntervalSet) elementName(literalNames []string, symbolicNames []string, a int) string { + if a == TokenEOF { + return "" + } else if a == TokenEpsilon { + return "" + } else { + if a < len(literalNames) && literalNames[a] != "" { + return literalNames[a] + } + + return symbolicNames[a] + } +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/jcollect.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/jcollect.go index e5a74f0c6c..921db4f533 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/jcollect.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/jcollect.go @@ -1,198 +1,198 @@ -package antlr - -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -import ( - "sort" -) - -// Collectable is an interface that a struct should implement if it is to be -// usable as a key in these collections. -type Collectable[T any] interface { - Hash() int - Equals(other Collectable[T]) bool -} - -type Comparator[T any] interface { - Hash1(o T) int - Equals2(T, T) bool -} - -// JStore implements a container that allows the use of a struct to calculate the key -// for a collection of values akin to map. This is not meant to be a full-blown HashMap but just -// serve the needs of the ANTLR Go runtime. -// -// For ease of porting the logic of the runtime from the master target (Java), this collection -// operates in a similar way to Java, in that it can use any struct that supplies a Hash() and Equals() -// function as the key. The values are stored in a standard go map which internally is a form of hashmap -// itself, the key for the go map is the hash supplied by the key object. The collection is able to deal with -// hash conflicts by using a simple slice of values associated with the hash code indexed bucket. That isn't -// particularly efficient, but it is simple, and it works. As this is specifically for the ANTLR runtime, and -// we understand the requirements, then this is fine - this is not a general purpose collection. -type JStore[T any, C Comparator[T]] struct { - store map[int][]T - len int - comparator Comparator[T] -} - -func NewJStore[T any, C Comparator[T]](comparator Comparator[T]) *JStore[T, C] { - - if comparator == nil { - panic("comparator cannot be nil") - } - - s := &JStore[T, C]{ - store: make(map[int][]T, 1), - comparator: comparator, - } - return s -} - -// Put will store given value in the collection. Note that the key for storage is generated from -// the value itself - this is specifically because that is what ANTLR needs - this would not be useful -// as any kind of general collection. -// -// If the key has a hash conflict, then the value will be added to the slice of values associated with the -// hash, unless the value is already in the slice, in which case the existing value is returned. Value equivalence is -// tested by calling the equals() method on the key. -// -// # If the given value is already present in the store, then the existing value is returned as v and exists is set to true -// -// If the given value is not present in the store, then the value is added to the store and returned as v and exists is set to false. -func (s *JStore[T, C]) Put(value T) (v T, exists bool) { //nolint:ireturn - - kh := s.comparator.Hash1(value) - - for _, v1 := range s.store[kh] { - if s.comparator.Equals2(value, v1) { - return v1, true - } - } - s.store[kh] = append(s.store[kh], value) - s.len++ - return value, false -} - -// Get will return the value associated with the key - the type of the key is the same type as the value -// which would not generally be useful, but this is a specific thing for ANTLR where the key is -// generated using the object we are going to store. -func (s *JStore[T, C]) Get(key T) (T, bool) { //nolint:ireturn - - kh := s.comparator.Hash1(key) - - for _, v := range s.store[kh] { - if s.comparator.Equals2(key, v) { - return v, true - } - } - return key, false -} - -// Contains returns true if the given key is present in the store -func (s *JStore[T, C]) Contains(key T) bool { //nolint:ireturn - - _, present := s.Get(key) - return present -} - -func (s *JStore[T, C]) SortedSlice(less func(i, j T) bool) []T { - vs := make([]T, 0, len(s.store)) - for _, v := range s.store { - vs = append(vs, v...) - } - sort.Slice(vs, func(i, j int) bool { - return less(vs[i], vs[j]) - }) - - return vs -} - -func (s *JStore[T, C]) Each(f func(T) bool) { - for _, e := range s.store { - for _, v := range e { - f(v) - } - } -} - -func (s *JStore[T, C]) Len() int { - return s.len -} - -func (s *JStore[T, C]) Values() []T { - vs := make([]T, 0, len(s.store)) - for _, e := range s.store { - for _, v := range e { - vs = append(vs, v) - } - } - return vs -} - -type entry[K, V any] struct { - key K - val V -} - -type JMap[K, V any, C Comparator[K]] struct { - store map[int][]*entry[K, V] - len int - comparator Comparator[K] -} - -func NewJMap[K, V any, C Comparator[K]](comparator Comparator[K]) *JMap[K, V, C] { - return &JMap[K, V, C]{ - store: make(map[int][]*entry[K, V], 1), - comparator: comparator, - } -} - -func (m *JMap[K, V, C]) Put(key K, val V) { - kh := m.comparator.Hash1(key) - - m.store[kh] = append(m.store[kh], &entry[K, V]{key, val}) - m.len++ -} - -func (m *JMap[K, V, C]) Values() []V { - vs := make([]V, 0, len(m.store)) - for _, e := range m.store { - for _, v := range e { - vs = append(vs, v.val) - } - } - return vs -} - -func (m *JMap[K, V, C]) Get(key K) (V, bool) { - - var none V - kh := m.comparator.Hash1(key) - for _, e := range m.store[kh] { - if m.comparator.Equals2(e.key, key) { - return e.val, true - } - } - return none, false -} - -func (m *JMap[K, V, C]) Len() int { - return len(m.store) -} - -func (m *JMap[K, V, C]) Delete(key K) { - kh := m.comparator.Hash1(key) - for i, e := range m.store[kh] { - if m.comparator.Equals2(e.key, key) { - m.store[kh] = append(m.store[kh][:i], m.store[kh][i+1:]...) - m.len-- - return - } - } -} - -func (m *JMap[K, V, C]) Clear() { - m.store = make(map[int][]*entry[K, V]) -} +package antlr + +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +import ( + "sort" +) + +// Collectable is an interface that a struct should implement if it is to be +// usable as a key in these collections. +type Collectable[T any] interface { + Hash() int + Equals(other Collectable[T]) bool +} + +type Comparator[T any] interface { + Hash1(o T) int + Equals2(T, T) bool +} + +// JStore implements a container that allows the use of a struct to calculate the key +// for a collection of values akin to map. This is not meant to be a full-blown HashMap but just +// serve the needs of the ANTLR Go runtime. +// +// For ease of porting the logic of the runtime from the master target (Java), this collection +// operates in a similar way to Java, in that it can use any struct that supplies a Hash() and Equals() +// function as the key. The values are stored in a standard go map which internally is a form of hashmap +// itself, the key for the go map is the hash supplied by the key object. The collection is able to deal with +// hash conflicts by using a simple slice of values associated with the hash code indexed bucket. That isn't +// particularly efficient, but it is simple, and it works. As this is specifically for the ANTLR runtime, and +// we understand the requirements, then this is fine - this is not a general purpose collection. +type JStore[T any, C Comparator[T]] struct { + store map[int][]T + len int + comparator Comparator[T] +} + +func NewJStore[T any, C Comparator[T]](comparator Comparator[T]) *JStore[T, C] { + + if comparator == nil { + panic("comparator cannot be nil") + } + + s := &JStore[T, C]{ + store: make(map[int][]T, 1), + comparator: comparator, + } + return s +} + +// Put will store given value in the collection. Note that the key for storage is generated from +// the value itself - this is specifically because that is what ANTLR needs - this would not be useful +// as any kind of general collection. +// +// If the key has a hash conflict, then the value will be added to the slice of values associated with the +// hash, unless the value is already in the slice, in which case the existing value is returned. Value equivalence is +// tested by calling the equals() method on the key. +// +// # If the given value is already present in the store, then the existing value is returned as v and exists is set to true +// +// If the given value is not present in the store, then the value is added to the store and returned as v and exists is set to false. +func (s *JStore[T, C]) Put(value T) (v T, exists bool) { //nolint:ireturn + + kh := s.comparator.Hash1(value) + + for _, v1 := range s.store[kh] { + if s.comparator.Equals2(value, v1) { + return v1, true + } + } + s.store[kh] = append(s.store[kh], value) + s.len++ + return value, false +} + +// Get will return the value associated with the key - the type of the key is the same type as the value +// which would not generally be useful, but this is a specific thing for ANTLR where the key is +// generated using the object we are going to store. +func (s *JStore[T, C]) Get(key T) (T, bool) { //nolint:ireturn + + kh := s.comparator.Hash1(key) + + for _, v := range s.store[kh] { + if s.comparator.Equals2(key, v) { + return v, true + } + } + return key, false +} + +// Contains returns true if the given key is present in the store +func (s *JStore[T, C]) Contains(key T) bool { //nolint:ireturn + + _, present := s.Get(key) + return present +} + +func (s *JStore[T, C]) SortedSlice(less func(i, j T) bool) []T { + vs := make([]T, 0, len(s.store)) + for _, v := range s.store { + vs = append(vs, v...) + } + sort.Slice(vs, func(i, j int) bool { + return less(vs[i], vs[j]) + }) + + return vs +} + +func (s *JStore[T, C]) Each(f func(T) bool) { + for _, e := range s.store { + for _, v := range e { + f(v) + } + } +} + +func (s *JStore[T, C]) Len() int { + return s.len +} + +func (s *JStore[T, C]) Values() []T { + vs := make([]T, 0, len(s.store)) + for _, e := range s.store { + for _, v := range e { + vs = append(vs, v) + } + } + return vs +} + +type entry[K, V any] struct { + key K + val V +} + +type JMap[K, V any, C Comparator[K]] struct { + store map[int][]*entry[K, V] + len int + comparator Comparator[K] +} + +func NewJMap[K, V any, C Comparator[K]](comparator Comparator[K]) *JMap[K, V, C] { + return &JMap[K, V, C]{ + store: make(map[int][]*entry[K, V], 1), + comparator: comparator, + } +} + +func (m *JMap[K, V, C]) Put(key K, val V) { + kh := m.comparator.Hash1(key) + + m.store[kh] = append(m.store[kh], &entry[K, V]{key, val}) + m.len++ +} + +func (m *JMap[K, V, C]) Values() []V { + vs := make([]V, 0, len(m.store)) + for _, e := range m.store { + for _, v := range e { + vs = append(vs, v.val) + } + } + return vs +} + +func (m *JMap[K, V, C]) Get(key K) (V, bool) { + + var none V + kh := m.comparator.Hash1(key) + for _, e := range m.store[kh] { + if m.comparator.Equals2(e.key, key) { + return e.val, true + } + } + return none, false +} + +func (m *JMap[K, V, C]) Len() int { + return len(m.store) +} + +func (m *JMap[K, V, C]) Delete(key K) { + kh := m.comparator.Hash1(key) + for i, e := range m.store[kh] { + if m.comparator.Equals2(e.key, key) { + m.store[kh] = append(m.store[kh][:i], m.store[kh][i+1:]...) + m.len-- + return + } + } +} + +func (m *JMap[K, V, C]) Clear() { + m.store = make(map[int][]*entry[K, V]) +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer.go index 6533f05164..c66bfd0b9a 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer.go @@ -1,416 +1,416 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "strconv" -) - -// A lexer is recognizer that draws input symbols from a character stream. -// lexer grammars result in a subclass of this object. A Lexer object -// uses simplified Match() and error recovery mechanisms in the interest -// of speed. -/// - -type Lexer interface { - TokenSource - Recognizer - - Emit() Token - - SetChannel(int) - PushMode(int) - PopMode() int - SetType(int) - SetMode(int) -} - -type BaseLexer struct { - *BaseRecognizer - - Interpreter ILexerATNSimulator - TokenStartCharIndex int - TokenStartLine int - TokenStartColumn int - ActionType int - Virt Lexer // The most derived lexer implementation. Allows virtual method calls. - - input CharStream - factory TokenFactory - tokenFactorySourcePair *TokenSourceCharStreamPair - token Token - hitEOF bool - channel int - thetype int - modeStack IntStack - mode int - text string -} - -func NewBaseLexer(input CharStream) *BaseLexer { - - lexer := new(BaseLexer) - - lexer.BaseRecognizer = NewBaseRecognizer() - - lexer.input = input - lexer.factory = CommonTokenFactoryDEFAULT - lexer.tokenFactorySourcePair = &TokenSourceCharStreamPair{lexer, input} - - lexer.Virt = lexer - - lexer.Interpreter = nil // child classes must populate it - - // The goal of all lexer rules/methods is to create a token object. - // l is an instance variable as multiple rules may collaborate to - // create a single token. NextToken will return l object after - // Matching lexer rule(s). If you subclass to allow multiple token - // emissions, then set l to the last token to be Matched or - // something nonnil so that the auto token emit mechanism will not - // emit another token. - lexer.token = nil - - // What character index in the stream did the current token start at? - // Needed, for example, to get the text for current token. Set at - // the start of NextToken. - lexer.TokenStartCharIndex = -1 - - // The line on which the first character of the token resides/// - lexer.TokenStartLine = -1 - - // The character position of first character within the line/// - lexer.TokenStartColumn = -1 - - // Once we see EOF on char stream, next token will be EOF. - // If you have DONE : EOF then you see DONE EOF. - lexer.hitEOF = false - - // The channel number for the current token/// - lexer.channel = TokenDefaultChannel - - // The token type for the current token/// - lexer.thetype = TokenInvalidType - - lexer.modeStack = make([]int, 0) - lexer.mode = LexerDefaultMode - - // You can set the text for the current token to override what is in - // the input char buffer. Use setText() or can set l instance var. - // / - lexer.text = "" - - return lexer -} - -const ( - LexerDefaultMode = 0 - LexerMore = -2 - LexerSkip = -3 -) - -const ( - LexerDefaultTokenChannel = TokenDefaultChannel - LexerHidden = TokenHiddenChannel - LexerMinCharValue = 0x0000 - LexerMaxCharValue = 0x10FFFF -) - -func (b *BaseLexer) reset() { - // wack Lexer state variables - if b.input != nil { - b.input.Seek(0) // rewind the input - } - b.token = nil - b.thetype = TokenInvalidType - b.channel = TokenDefaultChannel - b.TokenStartCharIndex = -1 - b.TokenStartColumn = -1 - b.TokenStartLine = -1 - b.text = "" - - b.hitEOF = false - b.mode = LexerDefaultMode - b.modeStack = make([]int, 0) - - b.Interpreter.reset() -} - -func (b *BaseLexer) GetInterpreter() ILexerATNSimulator { - return b.Interpreter -} - -func (b *BaseLexer) GetInputStream() CharStream { - return b.input -} - -func (b *BaseLexer) GetSourceName() string { - return b.GrammarFileName -} - -func (b *BaseLexer) SetChannel(v int) { - b.channel = v -} - -func (b *BaseLexer) GetTokenFactory() TokenFactory { - return b.factory -} - -func (b *BaseLexer) setTokenFactory(f TokenFactory) { - b.factory = f -} - -func (b *BaseLexer) safeMatch() (ret int) { - defer func() { - if e := recover(); e != nil { - if re, ok := e.(RecognitionException); ok { - b.notifyListeners(re) // Report error - b.Recover(re) - ret = LexerSkip // default - } - } - }() - - return b.Interpreter.Match(b.input, b.mode) -} - -// Return a token from l source i.e., Match a token on the char stream. -func (b *BaseLexer) NextToken() Token { - if b.input == nil { - panic("NextToken requires a non-nil input stream.") - } - - tokenStartMarker := b.input.Mark() - - // previously in finally block - defer func() { - // make sure we release marker after Match or - // unbuffered char stream will keep buffering - b.input.Release(tokenStartMarker) - }() - - for { - if b.hitEOF { - b.EmitEOF() - return b.token - } - b.token = nil - b.channel = TokenDefaultChannel - b.TokenStartCharIndex = b.input.Index() - b.TokenStartColumn = b.Interpreter.GetCharPositionInLine() - b.TokenStartLine = b.Interpreter.GetLine() - b.text = "" - continueOuter := false - for { - b.thetype = TokenInvalidType - ttype := LexerSkip - - ttype = b.safeMatch() - - if b.input.LA(1) == TokenEOF { - b.hitEOF = true - } - if b.thetype == TokenInvalidType { - b.thetype = ttype - } - if b.thetype == LexerSkip { - continueOuter = true - break - } - if b.thetype != LexerMore { - break - } - } - - if continueOuter { - continue - } - if b.token == nil { - b.Virt.Emit() - } - return b.token - } -} - -// Instruct the lexer to Skip creating a token for current lexer rule -// and look for another token. NextToken() knows to keep looking when -// a lexer rule finishes with token set to SKIPTOKEN. Recall that -// if token==nil at end of any token rule, it creates one for you -// and emits it. -// / -func (b *BaseLexer) Skip() { - b.thetype = LexerSkip -} - -func (b *BaseLexer) More() { - b.thetype = LexerMore -} - -func (b *BaseLexer) SetMode(m int) { - b.mode = m -} - -func (b *BaseLexer) PushMode(m int) { - if LexerATNSimulatorDebug { - fmt.Println("pushMode " + strconv.Itoa(m)) - } - b.modeStack.Push(b.mode) - b.mode = m -} - -func (b *BaseLexer) PopMode() int { - if len(b.modeStack) == 0 { - panic("Empty Stack") - } - if LexerATNSimulatorDebug { - fmt.Println("popMode back to " + fmt.Sprint(b.modeStack[0:len(b.modeStack)-1])) - } - i, _ := b.modeStack.Pop() - b.mode = i - return b.mode -} - -func (b *BaseLexer) inputStream() CharStream { - return b.input -} - -// SetInputStream resets the lexer input stream and associated lexer state. -func (b *BaseLexer) SetInputStream(input CharStream) { - b.input = nil - b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input} - b.reset() - b.input = input - b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input} -} - -func (b *BaseLexer) GetTokenSourceCharStreamPair() *TokenSourceCharStreamPair { - return b.tokenFactorySourcePair -} - -// By default does not support multiple emits per NextToken invocation -// for efficiency reasons. Subclass and override l method, NextToken, -// and GetToken (to push tokens into a list and pull from that list -// rather than a single variable as l implementation does). -// / -func (b *BaseLexer) EmitToken(token Token) { - b.token = token -} - -// The standard method called to automatically emit a token at the -// outermost lexical rule. The token object should point into the -// char buffer start..stop. If there is a text override in 'text', -// use that to set the token's text. Override l method to emit -// custom Token objects or provide a Newfactory. -// / -func (b *BaseLexer) Emit() Token { - t := b.factory.Create(b.tokenFactorySourcePair, b.thetype, b.text, b.channel, b.TokenStartCharIndex, b.GetCharIndex()-1, b.TokenStartLine, b.TokenStartColumn) - b.EmitToken(t) - return t -} - -func (b *BaseLexer) EmitEOF() Token { - cpos := b.GetCharPositionInLine() - lpos := b.GetLine() - eof := b.factory.Create(b.tokenFactorySourcePair, TokenEOF, "", TokenDefaultChannel, b.input.Index(), b.input.Index()-1, lpos, cpos) - b.EmitToken(eof) - return eof -} - -func (b *BaseLexer) GetCharPositionInLine() int { - return b.Interpreter.GetCharPositionInLine() -} - -func (b *BaseLexer) GetLine() int { - return b.Interpreter.GetLine() -} - -func (b *BaseLexer) GetType() int { - return b.thetype -} - -func (b *BaseLexer) SetType(t int) { - b.thetype = t -} - -// What is the index of the current character of lookahead?/// -func (b *BaseLexer) GetCharIndex() int { - return b.input.Index() -} - -// Return the text Matched so far for the current token or any text override. -// Set the complete text of l token it wipes any previous changes to the text. -func (b *BaseLexer) GetText() string { - if b.text != "" { - return b.text - } - - return b.Interpreter.GetText(b.input) -} - -func (b *BaseLexer) SetText(text string) { - b.text = text -} - -func (b *BaseLexer) GetATN() *ATN { - return b.Interpreter.ATN() -} - -// Return a list of all Token objects in input char stream. -// Forces load of all tokens. Does not include EOF token. -// / -func (b *BaseLexer) GetAllTokens() []Token { - vl := b.Virt - tokens := make([]Token, 0) - t := vl.NextToken() - for t.GetTokenType() != TokenEOF { - tokens = append(tokens, t) - t = vl.NextToken() - } - return tokens -} - -func (b *BaseLexer) notifyListeners(e RecognitionException) { - start := b.TokenStartCharIndex - stop := b.input.Index() - text := b.input.GetTextFromInterval(NewInterval(start, stop)) - msg := "token recognition error at: '" + text + "'" - listener := b.GetErrorListenerDispatch() - listener.SyntaxError(b, nil, b.TokenStartLine, b.TokenStartColumn, msg, e) -} - -func (b *BaseLexer) getErrorDisplayForChar(c rune) string { - if c == TokenEOF { - return "" - } else if c == '\n' { - return "\\n" - } else if c == '\t' { - return "\\t" - } else if c == '\r' { - return "\\r" - } else { - return string(c) - } -} - -func (b *BaseLexer) getCharErrorDisplay(c rune) string { - return "'" + b.getErrorDisplayForChar(c) + "'" -} - -// Lexers can normally Match any char in it's vocabulary after Matching -// a token, so do the easy thing and just kill a character and hope -// it all works out. You can instead use the rule invocation stack -// to do sophisticated error recovery if you are in a fragment rule. -// / -func (b *BaseLexer) Recover(re RecognitionException) { - if b.input.LA(1) != TokenEOF { - if _, ok := re.(*LexerNoViableAltException); ok { - // Skip a char and try again - b.Interpreter.Consume(b.input) - } else { - // TODO: Do we lose character or line position information? - b.input.Consume() - } - } -} +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" +) + +// A lexer is recognizer that draws input symbols from a character stream. +// lexer grammars result in a subclass of this object. A Lexer object +// uses simplified Match() and error recovery mechanisms in the interest +// of speed. +/// + +type Lexer interface { + TokenSource + Recognizer + + Emit() Token + + SetChannel(int) + PushMode(int) + PopMode() int + SetType(int) + SetMode(int) +} + +type BaseLexer struct { + *BaseRecognizer + + Interpreter ILexerATNSimulator + TokenStartCharIndex int + TokenStartLine int + TokenStartColumn int + ActionType int + Virt Lexer // The most derived lexer implementation. Allows virtual method calls. + + input CharStream + factory TokenFactory + tokenFactorySourcePair *TokenSourceCharStreamPair + token Token + hitEOF bool + channel int + thetype int + modeStack IntStack + mode int + text string +} + +func NewBaseLexer(input CharStream) *BaseLexer { + + lexer := new(BaseLexer) + + lexer.BaseRecognizer = NewBaseRecognizer() + + lexer.input = input + lexer.factory = CommonTokenFactoryDEFAULT + lexer.tokenFactorySourcePair = &TokenSourceCharStreamPair{lexer, input} + + lexer.Virt = lexer + + lexer.Interpreter = nil // child classes must populate it + + // The goal of all lexer rules/methods is to create a token object. + // l is an instance variable as multiple rules may collaborate to + // create a single token. NextToken will return l object after + // Matching lexer rule(s). If you subclass to allow multiple token + // emissions, then set l to the last token to be Matched or + // something nonnil so that the auto token emit mechanism will not + // emit another token. + lexer.token = nil + + // What character index in the stream did the current token start at? + // Needed, for example, to get the text for current token. Set at + // the start of NextToken. + lexer.TokenStartCharIndex = -1 + + // The line on which the first character of the token resides/// + lexer.TokenStartLine = -1 + + // The character position of first character within the line/// + lexer.TokenStartColumn = -1 + + // Once we see EOF on char stream, next token will be EOF. + // If you have DONE : EOF then you see DONE EOF. + lexer.hitEOF = false + + // The channel number for the current token/// + lexer.channel = TokenDefaultChannel + + // The token type for the current token/// + lexer.thetype = TokenInvalidType + + lexer.modeStack = make([]int, 0) + lexer.mode = LexerDefaultMode + + // You can set the text for the current token to override what is in + // the input char buffer. Use setText() or can set l instance var. + // / + lexer.text = "" + + return lexer +} + +const ( + LexerDefaultMode = 0 + LexerMore = -2 + LexerSkip = -3 +) + +const ( + LexerDefaultTokenChannel = TokenDefaultChannel + LexerHidden = TokenHiddenChannel + LexerMinCharValue = 0x0000 + LexerMaxCharValue = 0x10FFFF +) + +func (b *BaseLexer) reset() { + // wack Lexer state variables + if b.input != nil { + b.input.Seek(0) // rewind the input + } + b.token = nil + b.thetype = TokenInvalidType + b.channel = TokenDefaultChannel + b.TokenStartCharIndex = -1 + b.TokenStartColumn = -1 + b.TokenStartLine = -1 + b.text = "" + + b.hitEOF = false + b.mode = LexerDefaultMode + b.modeStack = make([]int, 0) + + b.Interpreter.reset() +} + +func (b *BaseLexer) GetInterpreter() ILexerATNSimulator { + return b.Interpreter +} + +func (b *BaseLexer) GetInputStream() CharStream { + return b.input +} + +func (b *BaseLexer) GetSourceName() string { + return b.GrammarFileName +} + +func (b *BaseLexer) SetChannel(v int) { + b.channel = v +} + +func (b *BaseLexer) GetTokenFactory() TokenFactory { + return b.factory +} + +func (b *BaseLexer) setTokenFactory(f TokenFactory) { + b.factory = f +} + +func (b *BaseLexer) safeMatch() (ret int) { + defer func() { + if e := recover(); e != nil { + if re, ok := e.(RecognitionException); ok { + b.notifyListeners(re) // Report error + b.Recover(re) + ret = LexerSkip // default + } + } + }() + + return b.Interpreter.Match(b.input, b.mode) +} + +// Return a token from l source i.e., Match a token on the char stream. +func (b *BaseLexer) NextToken() Token { + if b.input == nil { + panic("NextToken requires a non-nil input stream.") + } + + tokenStartMarker := b.input.Mark() + + // previously in finally block + defer func() { + // make sure we release marker after Match or + // unbuffered char stream will keep buffering + b.input.Release(tokenStartMarker) + }() + + for { + if b.hitEOF { + b.EmitEOF() + return b.token + } + b.token = nil + b.channel = TokenDefaultChannel + b.TokenStartCharIndex = b.input.Index() + b.TokenStartColumn = b.Interpreter.GetCharPositionInLine() + b.TokenStartLine = b.Interpreter.GetLine() + b.text = "" + continueOuter := false + for { + b.thetype = TokenInvalidType + ttype := LexerSkip + + ttype = b.safeMatch() + + if b.input.LA(1) == TokenEOF { + b.hitEOF = true + } + if b.thetype == TokenInvalidType { + b.thetype = ttype + } + if b.thetype == LexerSkip { + continueOuter = true + break + } + if b.thetype != LexerMore { + break + } + } + + if continueOuter { + continue + } + if b.token == nil { + b.Virt.Emit() + } + return b.token + } +} + +// Instruct the lexer to Skip creating a token for current lexer rule +// and look for another token. NextToken() knows to keep looking when +// a lexer rule finishes with token set to SKIPTOKEN. Recall that +// if token==nil at end of any token rule, it creates one for you +// and emits it. +// / +func (b *BaseLexer) Skip() { + b.thetype = LexerSkip +} + +func (b *BaseLexer) More() { + b.thetype = LexerMore +} + +func (b *BaseLexer) SetMode(m int) { + b.mode = m +} + +func (b *BaseLexer) PushMode(m int) { + if LexerATNSimulatorDebug { + fmt.Println("pushMode " + strconv.Itoa(m)) + } + b.modeStack.Push(b.mode) + b.mode = m +} + +func (b *BaseLexer) PopMode() int { + if len(b.modeStack) == 0 { + panic("Empty Stack") + } + if LexerATNSimulatorDebug { + fmt.Println("popMode back to " + fmt.Sprint(b.modeStack[0:len(b.modeStack)-1])) + } + i, _ := b.modeStack.Pop() + b.mode = i + return b.mode +} + +func (b *BaseLexer) inputStream() CharStream { + return b.input +} + +// SetInputStream resets the lexer input stream and associated lexer state. +func (b *BaseLexer) SetInputStream(input CharStream) { + b.input = nil + b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input} + b.reset() + b.input = input + b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input} +} + +func (b *BaseLexer) GetTokenSourceCharStreamPair() *TokenSourceCharStreamPair { + return b.tokenFactorySourcePair +} + +// By default does not support multiple emits per NextToken invocation +// for efficiency reasons. Subclass and override l method, NextToken, +// and GetToken (to push tokens into a list and pull from that list +// rather than a single variable as l implementation does). +// / +func (b *BaseLexer) EmitToken(token Token) { + b.token = token +} + +// The standard method called to automatically emit a token at the +// outermost lexical rule. The token object should point into the +// char buffer start..stop. If there is a text override in 'text', +// use that to set the token's text. Override l method to emit +// custom Token objects or provide a Newfactory. +// / +func (b *BaseLexer) Emit() Token { + t := b.factory.Create(b.tokenFactorySourcePair, b.thetype, b.text, b.channel, b.TokenStartCharIndex, b.GetCharIndex()-1, b.TokenStartLine, b.TokenStartColumn) + b.EmitToken(t) + return t +} + +func (b *BaseLexer) EmitEOF() Token { + cpos := b.GetCharPositionInLine() + lpos := b.GetLine() + eof := b.factory.Create(b.tokenFactorySourcePair, TokenEOF, "", TokenDefaultChannel, b.input.Index(), b.input.Index()-1, lpos, cpos) + b.EmitToken(eof) + return eof +} + +func (b *BaseLexer) GetCharPositionInLine() int { + return b.Interpreter.GetCharPositionInLine() +} + +func (b *BaseLexer) GetLine() int { + return b.Interpreter.GetLine() +} + +func (b *BaseLexer) GetType() int { + return b.thetype +} + +func (b *BaseLexer) SetType(t int) { + b.thetype = t +} + +// What is the index of the current character of lookahead?/// +func (b *BaseLexer) GetCharIndex() int { + return b.input.Index() +} + +// Return the text Matched so far for the current token or any text override. +// Set the complete text of l token it wipes any previous changes to the text. +func (b *BaseLexer) GetText() string { + if b.text != "" { + return b.text + } + + return b.Interpreter.GetText(b.input) +} + +func (b *BaseLexer) SetText(text string) { + b.text = text +} + +func (b *BaseLexer) GetATN() *ATN { + return b.Interpreter.ATN() +} + +// Return a list of all Token objects in input char stream. +// Forces load of all tokens. Does not include EOF token. +// / +func (b *BaseLexer) GetAllTokens() []Token { + vl := b.Virt + tokens := make([]Token, 0) + t := vl.NextToken() + for t.GetTokenType() != TokenEOF { + tokens = append(tokens, t) + t = vl.NextToken() + } + return tokens +} + +func (b *BaseLexer) notifyListeners(e RecognitionException) { + start := b.TokenStartCharIndex + stop := b.input.Index() + text := b.input.GetTextFromInterval(NewInterval(start, stop)) + msg := "token recognition error at: '" + text + "'" + listener := b.GetErrorListenerDispatch() + listener.SyntaxError(b, nil, b.TokenStartLine, b.TokenStartColumn, msg, e) +} + +func (b *BaseLexer) getErrorDisplayForChar(c rune) string { + if c == TokenEOF { + return "" + } else if c == '\n' { + return "\\n" + } else if c == '\t' { + return "\\t" + } else if c == '\r' { + return "\\r" + } else { + return string(c) + } +} + +func (b *BaseLexer) getCharErrorDisplay(c rune) string { + return "'" + b.getErrorDisplayForChar(c) + "'" +} + +// Lexers can normally Match any char in it's vocabulary after Matching +// a token, so do the easy thing and just kill a character and hope +// it all works out. You can instead use the rule invocation stack +// to do sophisticated error recovery if you are in a fragment rule. +// / +func (b *BaseLexer) Recover(re RecognitionException) { + if b.input.LA(1) != TokenEOF { + if _, ok := re.(*LexerNoViableAltException); ok { + // Skip a char and try again + b.Interpreter.Consume(b.input) + } else { + // TODO: Do we lose character or line position information? + b.input.Consume() + } + } +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action.go index 111656c295..fd71eeebfe 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action.go @@ -1,432 +1,432 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import "strconv" - -const ( - LexerActionTypeChannel = 0 //The type of a {@link LexerChannelAction} action. - LexerActionTypeCustom = 1 //The type of a {@link LexerCustomAction} action. - LexerActionTypeMode = 2 //The type of a {@link LexerModeAction} action. - LexerActionTypeMore = 3 //The type of a {@link LexerMoreAction} action. - LexerActionTypePopMode = 4 //The type of a {@link LexerPopModeAction} action. - LexerActionTypePushMode = 5 //The type of a {@link LexerPushModeAction} action. - LexerActionTypeSkip = 6 //The type of a {@link LexerSkipAction} action. - LexerActionTypeType = 7 //The type of a {@link LexerTypeAction} action. -) - -type LexerAction interface { - getActionType() int - getIsPositionDependent() bool - execute(lexer Lexer) - Hash() int - Equals(other LexerAction) bool -} - -type BaseLexerAction struct { - actionType int - isPositionDependent bool -} - -func NewBaseLexerAction(action int) *BaseLexerAction { - la := new(BaseLexerAction) - - la.actionType = action - la.isPositionDependent = false - - return la -} - -func (b *BaseLexerAction) execute(lexer Lexer) { - panic("Not implemented") -} - -func (b *BaseLexerAction) getActionType() int { - return b.actionType -} - -func (b *BaseLexerAction) getIsPositionDependent() bool { - return b.isPositionDependent -} - -func (b *BaseLexerAction) Hash() int { - return b.actionType -} - -func (b *BaseLexerAction) Equals(other LexerAction) bool { - return b == other -} - -// Implements the {@code Skip} lexer action by calling {@link Lexer//Skip}. -// -//

The {@code Skip} command does not have any parameters, so l action is -// implemented as a singleton instance exposed by {@link //INSTANCE}.

-type LexerSkipAction struct { - *BaseLexerAction -} - -func NewLexerSkipAction() *LexerSkipAction { - la := new(LexerSkipAction) - la.BaseLexerAction = NewBaseLexerAction(LexerActionTypeSkip) - return la -} - -// Provides a singleton instance of l parameterless lexer action. -var LexerSkipActionINSTANCE = NewLexerSkipAction() - -func (l *LexerSkipAction) execute(lexer Lexer) { - lexer.Skip() -} - -func (l *LexerSkipAction) String() string { - return "skip" -} - -// Implements the {@code type} lexer action by calling {@link Lexer//setType} -// -// with the assigned type. -type LexerTypeAction struct { - *BaseLexerAction - - thetype int -} - -func NewLexerTypeAction(thetype int) *LexerTypeAction { - l := new(LexerTypeAction) - l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeType) - l.thetype = thetype - return l -} - -func (l *LexerTypeAction) execute(lexer Lexer) { - lexer.SetType(l.thetype) -} - -func (l *LexerTypeAction) Hash() int { - h := murmurInit(0) - h = murmurUpdate(h, l.actionType) - h = murmurUpdate(h, l.thetype) - return murmurFinish(h, 2) -} - -func (l *LexerTypeAction) Equals(other LexerAction) bool { - if l == other { - return true - } else if _, ok := other.(*LexerTypeAction); !ok { - return false - } else { - return l.thetype == other.(*LexerTypeAction).thetype - } -} - -func (l *LexerTypeAction) String() string { - return "actionType(" + strconv.Itoa(l.thetype) + ")" -} - -// Implements the {@code pushMode} lexer action by calling -// {@link Lexer//pushMode} with the assigned mode. -type LexerPushModeAction struct { - *BaseLexerAction - - mode int -} - -func NewLexerPushModeAction(mode int) *LexerPushModeAction { - - l := new(LexerPushModeAction) - l.BaseLexerAction = NewBaseLexerAction(LexerActionTypePushMode) - - l.mode = mode - return l -} - -//

This action is implemented by calling {@link Lexer//pushMode} with the -// value provided by {@link //getMode}.

-func (l *LexerPushModeAction) execute(lexer Lexer) { - lexer.PushMode(l.mode) -} - -func (l *LexerPushModeAction) Hash() int { - h := murmurInit(0) - h = murmurUpdate(h, l.actionType) - h = murmurUpdate(h, l.mode) - return murmurFinish(h, 2) -} - -func (l *LexerPushModeAction) Equals(other LexerAction) bool { - if l == other { - return true - } else if _, ok := other.(*LexerPushModeAction); !ok { - return false - } else { - return l.mode == other.(*LexerPushModeAction).mode - } -} - -func (l *LexerPushModeAction) String() string { - return "pushMode(" + strconv.Itoa(l.mode) + ")" -} - -// Implements the {@code popMode} lexer action by calling {@link Lexer//popMode}. -// -//

The {@code popMode} command does not have any parameters, so l action is -// implemented as a singleton instance exposed by {@link //INSTANCE}.

-type LexerPopModeAction struct { - *BaseLexerAction -} - -func NewLexerPopModeAction() *LexerPopModeAction { - - l := new(LexerPopModeAction) - - l.BaseLexerAction = NewBaseLexerAction(LexerActionTypePopMode) - - return l -} - -var LexerPopModeActionINSTANCE = NewLexerPopModeAction() - -//

This action is implemented by calling {@link Lexer//popMode}.

-func (l *LexerPopModeAction) execute(lexer Lexer) { - lexer.PopMode() -} - -func (l *LexerPopModeAction) String() string { - return "popMode" -} - -// Implements the {@code more} lexer action by calling {@link Lexer//more}. -// -//

The {@code more} command does not have any parameters, so l action is -// implemented as a singleton instance exposed by {@link //INSTANCE}.

- -type LexerMoreAction struct { - *BaseLexerAction -} - -func NewLexerMoreAction() *LexerMoreAction { - l := new(LexerMoreAction) - l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeMore) - - return l -} - -var LexerMoreActionINSTANCE = NewLexerMoreAction() - -//

This action is implemented by calling {@link Lexer//popMode}.

-func (l *LexerMoreAction) execute(lexer Lexer) { - lexer.More() -} - -func (l *LexerMoreAction) String() string { - return "more" -} - -// Implements the {@code mode} lexer action by calling {@link Lexer//mode} with -// the assigned mode. -type LexerModeAction struct { - *BaseLexerAction - - mode int -} - -func NewLexerModeAction(mode int) *LexerModeAction { - l := new(LexerModeAction) - l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeMode) - l.mode = mode - return l -} - -//

This action is implemented by calling {@link Lexer//mode} with the -// value provided by {@link //getMode}.

-func (l *LexerModeAction) execute(lexer Lexer) { - lexer.SetMode(l.mode) -} - -func (l *LexerModeAction) Hash() int { - h := murmurInit(0) - h = murmurUpdate(h, l.actionType) - h = murmurUpdate(h, l.mode) - return murmurFinish(h, 2) -} - -func (l *LexerModeAction) Equals(other LexerAction) bool { - if l == other { - return true - } else if _, ok := other.(*LexerModeAction); !ok { - return false - } else { - return l.mode == other.(*LexerModeAction).mode - } -} - -func (l *LexerModeAction) String() string { - return "mode(" + strconv.Itoa(l.mode) + ")" -} - -// Executes a custom lexer action by calling {@link Recognizer//action} with the -// rule and action indexes assigned to the custom action. The implementation of -// a custom action is added to the generated code for the lexer in an override -// of {@link Recognizer//action} when the grammar is compiled. -// -//

This class may represent embedded actions created with the {...} -// syntax in ANTLR 4, as well as actions created for lexer commands where the -// command argument could not be evaluated when the grammar was compiled.

- -// Constructs a custom lexer action with the specified rule and action -// indexes. -// -// @param ruleIndex The rule index to use for calls to -// {@link Recognizer//action}. -// @param actionIndex The action index to use for calls to -// {@link Recognizer//action}. - -type LexerCustomAction struct { - *BaseLexerAction - ruleIndex, actionIndex int -} - -func NewLexerCustomAction(ruleIndex, actionIndex int) *LexerCustomAction { - l := new(LexerCustomAction) - l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeCustom) - l.ruleIndex = ruleIndex - l.actionIndex = actionIndex - l.isPositionDependent = true - return l -} - -//

Custom actions are implemented by calling {@link Lexer//action} with the -// appropriate rule and action indexes.

-func (l *LexerCustomAction) execute(lexer Lexer) { - lexer.Action(nil, l.ruleIndex, l.actionIndex) -} - -func (l *LexerCustomAction) Hash() int { - h := murmurInit(0) - h = murmurUpdate(h, l.actionType) - h = murmurUpdate(h, l.ruleIndex) - h = murmurUpdate(h, l.actionIndex) - return murmurFinish(h, 3) -} - -func (l *LexerCustomAction) Equals(other LexerAction) bool { - if l == other { - return true - } else if _, ok := other.(*LexerCustomAction); !ok { - return false - } else { - return l.ruleIndex == other.(*LexerCustomAction).ruleIndex && - l.actionIndex == other.(*LexerCustomAction).actionIndex - } -} - -// Implements the {@code channel} lexer action by calling -// {@link Lexer//setChannel} with the assigned channel. -// Constructs a New{@code channel} action with the specified channel value. -// @param channel The channel value to pass to {@link Lexer//setChannel}. -type LexerChannelAction struct { - *BaseLexerAction - - channel int -} - -func NewLexerChannelAction(channel int) *LexerChannelAction { - l := new(LexerChannelAction) - l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeChannel) - l.channel = channel - return l -} - -//

This action is implemented by calling {@link Lexer//setChannel} with the -// value provided by {@link //getChannel}.

-func (l *LexerChannelAction) execute(lexer Lexer) { - lexer.SetChannel(l.channel) -} - -func (l *LexerChannelAction) Hash() int { - h := murmurInit(0) - h = murmurUpdate(h, l.actionType) - h = murmurUpdate(h, l.channel) - return murmurFinish(h, 2) -} - -func (l *LexerChannelAction) Equals(other LexerAction) bool { - if l == other { - return true - } else if _, ok := other.(*LexerChannelAction); !ok { - return false - } else { - return l.channel == other.(*LexerChannelAction).channel - } -} - -func (l *LexerChannelAction) String() string { - return "channel(" + strconv.Itoa(l.channel) + ")" -} - -// This implementation of {@link LexerAction} is used for tracking input offsets -// for position-dependent actions within a {@link LexerActionExecutor}. -// -//

This action is not serialized as part of the ATN, and is only required for -// position-dependent lexer actions which appear at a location other than the -// end of a rule. For more information about DFA optimizations employed for -// lexer actions, see {@link LexerActionExecutor//append} and -// {@link LexerActionExecutor//fixOffsetBeforeMatch}.

- -// Constructs a Newindexed custom action by associating a character offset -// with a {@link LexerAction}. -// -//

Note: This class is only required for lexer actions for which -// {@link LexerAction//isPositionDependent} returns {@code true}.

-// -// @param offset The offset into the input {@link CharStream}, relative to -// the token start index, at which the specified lexer action should be -// executed. -// @param action The lexer action to execute at a particular offset in the -// input {@link CharStream}. -type LexerIndexedCustomAction struct { - *BaseLexerAction - - offset int - lexerAction LexerAction - isPositionDependent bool -} - -func NewLexerIndexedCustomAction(offset int, lexerAction LexerAction) *LexerIndexedCustomAction { - - l := new(LexerIndexedCustomAction) - l.BaseLexerAction = NewBaseLexerAction(lexerAction.getActionType()) - - l.offset = offset - l.lexerAction = lexerAction - l.isPositionDependent = true - - return l -} - -//

This method calls {@link //execute} on the result of {@link //getAction} -// using the provided {@code lexer}.

-func (l *LexerIndexedCustomAction) execute(lexer Lexer) { - // assume the input stream position was properly set by the calling code - l.lexerAction.execute(lexer) -} - -func (l *LexerIndexedCustomAction) Hash() int { - h := murmurInit(0) - h = murmurUpdate(h, l.offset) - h = murmurUpdate(h, l.lexerAction.Hash()) - return murmurFinish(h, 2) -} - -func (l *LexerIndexedCustomAction) equals(other LexerAction) bool { - if l == other { - return true - } else if _, ok := other.(*LexerIndexedCustomAction); !ok { - return false - } else { - return l.offset == other.(*LexerIndexedCustomAction).offset && - l.lexerAction.Equals(other.(*LexerIndexedCustomAction).lexerAction) - } -} +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import "strconv" + +const ( + LexerActionTypeChannel = 0 //The type of a {@link LexerChannelAction} action. + LexerActionTypeCustom = 1 //The type of a {@link LexerCustomAction} action. + LexerActionTypeMode = 2 //The type of a {@link LexerModeAction} action. + LexerActionTypeMore = 3 //The type of a {@link LexerMoreAction} action. + LexerActionTypePopMode = 4 //The type of a {@link LexerPopModeAction} action. + LexerActionTypePushMode = 5 //The type of a {@link LexerPushModeAction} action. + LexerActionTypeSkip = 6 //The type of a {@link LexerSkipAction} action. + LexerActionTypeType = 7 //The type of a {@link LexerTypeAction} action. +) + +type LexerAction interface { + getActionType() int + getIsPositionDependent() bool + execute(lexer Lexer) + Hash() int + Equals(other LexerAction) bool +} + +type BaseLexerAction struct { + actionType int + isPositionDependent bool +} + +func NewBaseLexerAction(action int) *BaseLexerAction { + la := new(BaseLexerAction) + + la.actionType = action + la.isPositionDependent = false + + return la +} + +func (b *BaseLexerAction) execute(lexer Lexer) { + panic("Not implemented") +} + +func (b *BaseLexerAction) getActionType() int { + return b.actionType +} + +func (b *BaseLexerAction) getIsPositionDependent() bool { + return b.isPositionDependent +} + +func (b *BaseLexerAction) Hash() int { + return b.actionType +} + +func (b *BaseLexerAction) Equals(other LexerAction) bool { + return b == other +} + +// Implements the {@code Skip} lexer action by calling {@link Lexer//Skip}. +// +//

The {@code Skip} command does not have any parameters, so l action is +// implemented as a singleton instance exposed by {@link //INSTANCE}.

+type LexerSkipAction struct { + *BaseLexerAction +} + +func NewLexerSkipAction() *LexerSkipAction { + la := new(LexerSkipAction) + la.BaseLexerAction = NewBaseLexerAction(LexerActionTypeSkip) + return la +} + +// Provides a singleton instance of l parameterless lexer action. +var LexerSkipActionINSTANCE = NewLexerSkipAction() + +func (l *LexerSkipAction) execute(lexer Lexer) { + lexer.Skip() +} + +func (l *LexerSkipAction) String() string { + return "skip" +} + +// Implements the {@code type} lexer action by calling {@link Lexer//setType} +// +// with the assigned type. +type LexerTypeAction struct { + *BaseLexerAction + + thetype int +} + +func NewLexerTypeAction(thetype int) *LexerTypeAction { + l := new(LexerTypeAction) + l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeType) + l.thetype = thetype + return l +} + +func (l *LexerTypeAction) execute(lexer Lexer) { + lexer.SetType(l.thetype) +} + +func (l *LexerTypeAction) Hash() int { + h := murmurInit(0) + h = murmurUpdate(h, l.actionType) + h = murmurUpdate(h, l.thetype) + return murmurFinish(h, 2) +} + +func (l *LexerTypeAction) Equals(other LexerAction) bool { + if l == other { + return true + } else if _, ok := other.(*LexerTypeAction); !ok { + return false + } else { + return l.thetype == other.(*LexerTypeAction).thetype + } +} + +func (l *LexerTypeAction) String() string { + return "actionType(" + strconv.Itoa(l.thetype) + ")" +} + +// Implements the {@code pushMode} lexer action by calling +// {@link Lexer//pushMode} with the assigned mode. +type LexerPushModeAction struct { + *BaseLexerAction + + mode int +} + +func NewLexerPushModeAction(mode int) *LexerPushModeAction { + + l := new(LexerPushModeAction) + l.BaseLexerAction = NewBaseLexerAction(LexerActionTypePushMode) + + l.mode = mode + return l +} + +//

This action is implemented by calling {@link Lexer//pushMode} with the +// value provided by {@link //getMode}.

+func (l *LexerPushModeAction) execute(lexer Lexer) { + lexer.PushMode(l.mode) +} + +func (l *LexerPushModeAction) Hash() int { + h := murmurInit(0) + h = murmurUpdate(h, l.actionType) + h = murmurUpdate(h, l.mode) + return murmurFinish(h, 2) +} + +func (l *LexerPushModeAction) Equals(other LexerAction) bool { + if l == other { + return true + } else if _, ok := other.(*LexerPushModeAction); !ok { + return false + } else { + return l.mode == other.(*LexerPushModeAction).mode + } +} + +func (l *LexerPushModeAction) String() string { + return "pushMode(" + strconv.Itoa(l.mode) + ")" +} + +// Implements the {@code popMode} lexer action by calling {@link Lexer//popMode}. +// +//

The {@code popMode} command does not have any parameters, so l action is +// implemented as a singleton instance exposed by {@link //INSTANCE}.

+type LexerPopModeAction struct { + *BaseLexerAction +} + +func NewLexerPopModeAction() *LexerPopModeAction { + + l := new(LexerPopModeAction) + + l.BaseLexerAction = NewBaseLexerAction(LexerActionTypePopMode) + + return l +} + +var LexerPopModeActionINSTANCE = NewLexerPopModeAction() + +//

This action is implemented by calling {@link Lexer//popMode}.

+func (l *LexerPopModeAction) execute(lexer Lexer) { + lexer.PopMode() +} + +func (l *LexerPopModeAction) String() string { + return "popMode" +} + +// Implements the {@code more} lexer action by calling {@link Lexer//more}. +// +//

The {@code more} command does not have any parameters, so l action is +// implemented as a singleton instance exposed by {@link //INSTANCE}.

+ +type LexerMoreAction struct { + *BaseLexerAction +} + +func NewLexerMoreAction() *LexerMoreAction { + l := new(LexerMoreAction) + l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeMore) + + return l +} + +var LexerMoreActionINSTANCE = NewLexerMoreAction() + +//

This action is implemented by calling {@link Lexer//popMode}.

+func (l *LexerMoreAction) execute(lexer Lexer) { + lexer.More() +} + +func (l *LexerMoreAction) String() string { + return "more" +} + +// Implements the {@code mode} lexer action by calling {@link Lexer//mode} with +// the assigned mode. +type LexerModeAction struct { + *BaseLexerAction + + mode int +} + +func NewLexerModeAction(mode int) *LexerModeAction { + l := new(LexerModeAction) + l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeMode) + l.mode = mode + return l +} + +//

This action is implemented by calling {@link Lexer//mode} with the +// value provided by {@link //getMode}.

+func (l *LexerModeAction) execute(lexer Lexer) { + lexer.SetMode(l.mode) +} + +func (l *LexerModeAction) Hash() int { + h := murmurInit(0) + h = murmurUpdate(h, l.actionType) + h = murmurUpdate(h, l.mode) + return murmurFinish(h, 2) +} + +func (l *LexerModeAction) Equals(other LexerAction) bool { + if l == other { + return true + } else if _, ok := other.(*LexerModeAction); !ok { + return false + } else { + return l.mode == other.(*LexerModeAction).mode + } +} + +func (l *LexerModeAction) String() string { + return "mode(" + strconv.Itoa(l.mode) + ")" +} + +// Executes a custom lexer action by calling {@link Recognizer//action} with the +// rule and action indexes assigned to the custom action. The implementation of +// a custom action is added to the generated code for the lexer in an override +// of {@link Recognizer//action} when the grammar is compiled. +// +//

This class may represent embedded actions created with the {...} +// syntax in ANTLR 4, as well as actions created for lexer commands where the +// command argument could not be evaluated when the grammar was compiled.

+ +// Constructs a custom lexer action with the specified rule and action +// indexes. +// +// @param ruleIndex The rule index to use for calls to +// {@link Recognizer//action}. +// @param actionIndex The action index to use for calls to +// {@link Recognizer//action}. + +type LexerCustomAction struct { + *BaseLexerAction + ruleIndex, actionIndex int +} + +func NewLexerCustomAction(ruleIndex, actionIndex int) *LexerCustomAction { + l := new(LexerCustomAction) + l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeCustom) + l.ruleIndex = ruleIndex + l.actionIndex = actionIndex + l.isPositionDependent = true + return l +} + +//

Custom actions are implemented by calling {@link Lexer//action} with the +// appropriate rule and action indexes.

+func (l *LexerCustomAction) execute(lexer Lexer) { + lexer.Action(nil, l.ruleIndex, l.actionIndex) +} + +func (l *LexerCustomAction) Hash() int { + h := murmurInit(0) + h = murmurUpdate(h, l.actionType) + h = murmurUpdate(h, l.ruleIndex) + h = murmurUpdate(h, l.actionIndex) + return murmurFinish(h, 3) +} + +func (l *LexerCustomAction) Equals(other LexerAction) bool { + if l == other { + return true + } else if _, ok := other.(*LexerCustomAction); !ok { + return false + } else { + return l.ruleIndex == other.(*LexerCustomAction).ruleIndex && + l.actionIndex == other.(*LexerCustomAction).actionIndex + } +} + +// Implements the {@code channel} lexer action by calling +// {@link Lexer//setChannel} with the assigned channel. +// Constructs a New{@code channel} action with the specified channel value. +// @param channel The channel value to pass to {@link Lexer//setChannel}. +type LexerChannelAction struct { + *BaseLexerAction + + channel int +} + +func NewLexerChannelAction(channel int) *LexerChannelAction { + l := new(LexerChannelAction) + l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeChannel) + l.channel = channel + return l +} + +//

This action is implemented by calling {@link Lexer//setChannel} with the +// value provided by {@link //getChannel}.

+func (l *LexerChannelAction) execute(lexer Lexer) { + lexer.SetChannel(l.channel) +} + +func (l *LexerChannelAction) Hash() int { + h := murmurInit(0) + h = murmurUpdate(h, l.actionType) + h = murmurUpdate(h, l.channel) + return murmurFinish(h, 2) +} + +func (l *LexerChannelAction) Equals(other LexerAction) bool { + if l == other { + return true + } else if _, ok := other.(*LexerChannelAction); !ok { + return false + } else { + return l.channel == other.(*LexerChannelAction).channel + } +} + +func (l *LexerChannelAction) String() string { + return "channel(" + strconv.Itoa(l.channel) + ")" +} + +// This implementation of {@link LexerAction} is used for tracking input offsets +// for position-dependent actions within a {@link LexerActionExecutor}. +// +//

This action is not serialized as part of the ATN, and is only required for +// position-dependent lexer actions which appear at a location other than the +// end of a rule. For more information about DFA optimizations employed for +// lexer actions, see {@link LexerActionExecutor//append} and +// {@link LexerActionExecutor//fixOffsetBeforeMatch}.

+ +// Constructs a Newindexed custom action by associating a character offset +// with a {@link LexerAction}. +// +//

Note: This class is only required for lexer actions for which +// {@link LexerAction//isPositionDependent} returns {@code true}.

+// +// @param offset The offset into the input {@link CharStream}, relative to +// the token start index, at which the specified lexer action should be +// executed. +// @param action The lexer action to execute at a particular offset in the +// input {@link CharStream}. +type LexerIndexedCustomAction struct { + *BaseLexerAction + + offset int + lexerAction LexerAction + isPositionDependent bool +} + +func NewLexerIndexedCustomAction(offset int, lexerAction LexerAction) *LexerIndexedCustomAction { + + l := new(LexerIndexedCustomAction) + l.BaseLexerAction = NewBaseLexerAction(lexerAction.getActionType()) + + l.offset = offset + l.lexerAction = lexerAction + l.isPositionDependent = true + + return l +} + +//

This method calls {@link //execute} on the result of {@link //getAction} +// using the provided {@code lexer}.

+func (l *LexerIndexedCustomAction) execute(lexer Lexer) { + // assume the input stream position was properly set by the calling code + l.lexerAction.execute(lexer) +} + +func (l *LexerIndexedCustomAction) Hash() int { + h := murmurInit(0) + h = murmurUpdate(h, l.offset) + h = murmurUpdate(h, l.lexerAction.Hash()) + return murmurFinish(h, 2) +} + +func (l *LexerIndexedCustomAction) equals(other LexerAction) bool { + if l == other { + return true + } else if _, ok := other.(*LexerIndexedCustomAction); !ok { + return false + } else { + return l.offset == other.(*LexerIndexedCustomAction).offset && + l.lexerAction.Equals(other.(*LexerIndexedCustomAction).lexerAction) + } +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action_executor.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action_executor.go index be1ba7a7e3..8666cda376 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action_executor.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_action_executor.go @@ -1,186 +1,186 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import "golang.org/x/exp/slices" - -// Represents an executor for a sequence of lexer actions which traversed during -// the Matching operation of a lexer rule (token). -// -//

The executor tracks position information for position-dependent lexer actions -// efficiently, ensuring that actions appearing only at the end of the rule do -// not cause bloating of the {@link DFA} created for the lexer.

- -type LexerActionExecutor struct { - lexerActions []LexerAction - cachedHash int -} - -func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor { - - if lexerActions == nil { - lexerActions = make([]LexerAction, 0) - } - - l := new(LexerActionExecutor) - - l.lexerActions = lexerActions - - // Caches the result of {@link //hashCode} since the hash code is an element - // of the performance-critical {@link LexerATNConfig//hashCode} operation. - l.cachedHash = murmurInit(57) - for _, a := range lexerActions { - l.cachedHash = murmurUpdate(l.cachedHash, a.Hash()) - } - - return l -} - -// Creates a {@link LexerActionExecutor} which executes the actions for -// the input {@code lexerActionExecutor} followed by a specified -// {@code lexerAction}. -// -// @param lexerActionExecutor The executor for actions already traversed by -// the lexer while Matching a token within a particular -// {@link LexerATNConfig}. If this is {@code nil}, the method behaves as -// though it were an empty executor. -// @param lexerAction The lexer action to execute after the actions -// specified in {@code lexerActionExecutor}. -// -// @return A {@link LexerActionExecutor} for executing the combine actions -// of {@code lexerActionExecutor} and {@code lexerAction}. -func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAction LexerAction) *LexerActionExecutor { - if lexerActionExecutor == nil { - return NewLexerActionExecutor([]LexerAction{lexerAction}) - } - - return NewLexerActionExecutor(append(lexerActionExecutor.lexerActions, lexerAction)) -} - -// Creates a {@link LexerActionExecutor} which encodes the current offset -// for position-dependent lexer actions. -// -//

Normally, when the executor encounters lexer actions where -// {@link LexerAction//isPositionDependent} returns {@code true}, it calls -// {@link IntStream//seek} on the input {@link CharStream} to set the input -// position to the end of the current token. This behavior provides -// for efficient DFA representation of lexer actions which appear at the end -// of a lexer rule, even when the lexer rule Matches a variable number of -// characters.

-// -//

Prior to traversing a Match transition in the ATN, the current offset -// from the token start index is assigned to all position-dependent lexer -// actions which have not already been assigned a fixed offset. By storing -// the offsets relative to the token start index, the DFA representation of -// lexer actions which appear in the middle of tokens remains efficient due -// to sharing among tokens of the same length, regardless of their absolute -// position in the input stream.

-// -//

If the current executor already has offsets assigned to all -// position-dependent lexer actions, the method returns {@code this}.

-// -// @param offset The current offset to assign to all position-dependent -// lexer actions which do not already have offsets assigned. -// -// @return A {@link LexerActionExecutor} which stores input stream offsets -// for all position-dependent lexer actions. -// / -func (l *LexerActionExecutor) fixOffsetBeforeMatch(offset int) *LexerActionExecutor { - var updatedLexerActions []LexerAction - for i := 0; i < len(l.lexerActions); i++ { - _, ok := l.lexerActions[i].(*LexerIndexedCustomAction) - if l.lexerActions[i].getIsPositionDependent() && !ok { - if updatedLexerActions == nil { - updatedLexerActions = make([]LexerAction, 0) - - for _, a := range l.lexerActions { - updatedLexerActions = append(updatedLexerActions, a) - } - } - - updatedLexerActions[i] = NewLexerIndexedCustomAction(offset, l.lexerActions[i]) - } - } - if updatedLexerActions == nil { - return l - } - - return NewLexerActionExecutor(updatedLexerActions) -} - -// Execute the actions encapsulated by l executor within the context of a -// particular {@link Lexer}. -// -//

This method calls {@link IntStream//seek} to set the position of the -// {@code input} {@link CharStream} prior to calling -// {@link LexerAction//execute} on a position-dependent action. Before the -// method returns, the input position will be restored to the same position -// it was in when the method was invoked.

-// -// @param lexer The lexer instance. -// @param input The input stream which is the source for the current token. -// When l method is called, the current {@link IntStream//index} for -// {@code input} should be the start of the following token, i.e. 1 -// character past the end of the current token. -// @param startIndex The token start index. This value may be passed to -// {@link IntStream//seek} to set the {@code input} position to the beginning -// of the token. -// / -func (l *LexerActionExecutor) execute(lexer Lexer, input CharStream, startIndex int) { - requiresSeek := false - stopIndex := input.Index() - - defer func() { - if requiresSeek { - input.Seek(stopIndex) - } - }() - - for i := 0; i < len(l.lexerActions); i++ { - lexerAction := l.lexerActions[i] - if la, ok := lexerAction.(*LexerIndexedCustomAction); ok { - offset := la.offset - input.Seek(startIndex + offset) - lexerAction = la.lexerAction - requiresSeek = (startIndex + offset) != stopIndex - } else if lexerAction.getIsPositionDependent() { - input.Seek(stopIndex) - requiresSeek = false - } - lexerAction.execute(lexer) - } -} - -func (l *LexerActionExecutor) Hash() int { - if l == nil { - // TODO: Why is this here? l should not be nil - return 61 - } - - // TODO: This is created from the action itself when the struct is created - will this be an issue at some point? Java uses the runtime assign hashcode - return l.cachedHash -} - -func (l *LexerActionExecutor) Equals(other interface{}) bool { - if l == other { - return true - } - othert, ok := other.(*LexerActionExecutor) - if !ok { - return false - } - if othert == nil { - return false - } - if l.cachedHash != othert.cachedHash { - return false - } - if len(l.lexerActions) != len(othert.lexerActions) { - return false - } - return slices.EqualFunc(l.lexerActions, othert.lexerActions, func(i, j LexerAction) bool { - return i.Equals(j) - }) -} +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import "golang.org/x/exp/slices" + +// Represents an executor for a sequence of lexer actions which traversed during +// the Matching operation of a lexer rule (token). +// +//

The executor tracks position information for position-dependent lexer actions +// efficiently, ensuring that actions appearing only at the end of the rule do +// not cause bloating of the {@link DFA} created for the lexer.

+ +type LexerActionExecutor struct { + lexerActions []LexerAction + cachedHash int +} + +func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor { + + if lexerActions == nil { + lexerActions = make([]LexerAction, 0) + } + + l := new(LexerActionExecutor) + + l.lexerActions = lexerActions + + // Caches the result of {@link //hashCode} since the hash code is an element + // of the performance-critical {@link LexerATNConfig//hashCode} operation. + l.cachedHash = murmurInit(57) + for _, a := range lexerActions { + l.cachedHash = murmurUpdate(l.cachedHash, a.Hash()) + } + + return l +} + +// Creates a {@link LexerActionExecutor} which executes the actions for +// the input {@code lexerActionExecutor} followed by a specified +// {@code lexerAction}. +// +// @param lexerActionExecutor The executor for actions already traversed by +// the lexer while Matching a token within a particular +// {@link LexerATNConfig}. If this is {@code nil}, the method behaves as +// though it were an empty executor. +// @param lexerAction The lexer action to execute after the actions +// specified in {@code lexerActionExecutor}. +// +// @return A {@link LexerActionExecutor} for executing the combine actions +// of {@code lexerActionExecutor} and {@code lexerAction}. +func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAction LexerAction) *LexerActionExecutor { + if lexerActionExecutor == nil { + return NewLexerActionExecutor([]LexerAction{lexerAction}) + } + + return NewLexerActionExecutor(append(lexerActionExecutor.lexerActions, lexerAction)) +} + +// Creates a {@link LexerActionExecutor} which encodes the current offset +// for position-dependent lexer actions. +// +//

Normally, when the executor encounters lexer actions where +// {@link LexerAction//isPositionDependent} returns {@code true}, it calls +// {@link IntStream//seek} on the input {@link CharStream} to set the input +// position to the end of the current token. This behavior provides +// for efficient DFA representation of lexer actions which appear at the end +// of a lexer rule, even when the lexer rule Matches a variable number of +// characters.

+// +//

Prior to traversing a Match transition in the ATN, the current offset +// from the token start index is assigned to all position-dependent lexer +// actions which have not already been assigned a fixed offset. By storing +// the offsets relative to the token start index, the DFA representation of +// lexer actions which appear in the middle of tokens remains efficient due +// to sharing among tokens of the same length, regardless of their absolute +// position in the input stream.

+// +//

If the current executor already has offsets assigned to all +// position-dependent lexer actions, the method returns {@code this}.

+// +// @param offset The current offset to assign to all position-dependent +// lexer actions which do not already have offsets assigned. +// +// @return A {@link LexerActionExecutor} which stores input stream offsets +// for all position-dependent lexer actions. +// / +func (l *LexerActionExecutor) fixOffsetBeforeMatch(offset int) *LexerActionExecutor { + var updatedLexerActions []LexerAction + for i := 0; i < len(l.lexerActions); i++ { + _, ok := l.lexerActions[i].(*LexerIndexedCustomAction) + if l.lexerActions[i].getIsPositionDependent() && !ok { + if updatedLexerActions == nil { + updatedLexerActions = make([]LexerAction, 0) + + for _, a := range l.lexerActions { + updatedLexerActions = append(updatedLexerActions, a) + } + } + + updatedLexerActions[i] = NewLexerIndexedCustomAction(offset, l.lexerActions[i]) + } + } + if updatedLexerActions == nil { + return l + } + + return NewLexerActionExecutor(updatedLexerActions) +} + +// Execute the actions encapsulated by l executor within the context of a +// particular {@link Lexer}. +// +//

This method calls {@link IntStream//seek} to set the position of the +// {@code input} {@link CharStream} prior to calling +// {@link LexerAction//execute} on a position-dependent action. Before the +// method returns, the input position will be restored to the same position +// it was in when the method was invoked.

+// +// @param lexer The lexer instance. +// @param input The input stream which is the source for the current token. +// When l method is called, the current {@link IntStream//index} for +// {@code input} should be the start of the following token, i.e. 1 +// character past the end of the current token. +// @param startIndex The token start index. This value may be passed to +// {@link IntStream//seek} to set the {@code input} position to the beginning +// of the token. +// / +func (l *LexerActionExecutor) execute(lexer Lexer, input CharStream, startIndex int) { + requiresSeek := false + stopIndex := input.Index() + + defer func() { + if requiresSeek { + input.Seek(stopIndex) + } + }() + + for i := 0; i < len(l.lexerActions); i++ { + lexerAction := l.lexerActions[i] + if la, ok := lexerAction.(*LexerIndexedCustomAction); ok { + offset := la.offset + input.Seek(startIndex + offset) + lexerAction = la.lexerAction + requiresSeek = (startIndex + offset) != stopIndex + } else if lexerAction.getIsPositionDependent() { + input.Seek(stopIndex) + requiresSeek = false + } + lexerAction.execute(lexer) + } +} + +func (l *LexerActionExecutor) Hash() int { + if l == nil { + // TODO: Why is this here? l should not be nil + return 61 + } + + // TODO: This is created from the action itself when the struct is created - will this be an issue at some point? Java uses the runtime assign hashcode + return l.cachedHash +} + +func (l *LexerActionExecutor) Equals(other interface{}) bool { + if l == other { + return true + } + othert, ok := other.(*LexerActionExecutor) + if !ok { + return false + } + if othert == nil { + return false + } + if l.cachedHash != othert.cachedHash { + return false + } + if len(l.lexerActions) != len(othert.lexerActions) { + return false + } + return slices.EqualFunc(l.lexerActions, othert.lexerActions, func(i, j LexerAction) bool { + return i.Equals(j) + }) +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_atn_simulator.go index c573b75210..e0299f9edf 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_atn_simulator.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/lexer_atn_simulator.go @@ -1,684 +1,684 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "strconv" - "strings" -) - -var ( - LexerATNSimulatorDebug = false - LexerATNSimulatorDFADebug = false - - LexerATNSimulatorMinDFAEdge = 0 - LexerATNSimulatorMaxDFAEdge = 127 // forces unicode to stay in ATN - - LexerATNSimulatorMatchCalls = 0 -) - -type ILexerATNSimulator interface { - IATNSimulator - - reset() - Match(input CharStream, mode int) int - GetCharPositionInLine() int - GetLine() int - GetText(input CharStream) string - Consume(input CharStream) -} - -type LexerATNSimulator struct { - *BaseATNSimulator - - recog Lexer - predictionMode int - mergeCache DoubleDict - startIndex int - Line int - CharPositionInLine int - mode int - prevAccept *SimState - MatchCalls int -} - -func NewLexerATNSimulator(recog Lexer, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *LexerATNSimulator { - l := new(LexerATNSimulator) - - l.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache) - - l.decisionToDFA = decisionToDFA - l.recog = recog - // The current token's starting index into the character stream. - // Shared across DFA to ATN simulation in case the ATN fails and the - // DFA did not have a previous accept state. In l case, we use the - // ATN-generated exception object. - l.startIndex = -1 - // line number 1..n within the input/// - l.Line = 1 - // The index of the character relative to the beginning of the line - // 0..n-1/// - l.CharPositionInLine = 0 - l.mode = LexerDefaultMode - // Used during DFA/ATN exec to record the most recent accept configuration - // info - l.prevAccept = NewSimState() - // done - return l -} - -func (l *LexerATNSimulator) copyState(simulator *LexerATNSimulator) { - l.CharPositionInLine = simulator.CharPositionInLine - l.Line = simulator.Line - l.mode = simulator.mode - l.startIndex = simulator.startIndex -} - -func (l *LexerATNSimulator) Match(input CharStream, mode int) int { - l.MatchCalls++ - l.mode = mode - mark := input.Mark() - - defer func() { - input.Release(mark) - }() - - l.startIndex = input.Index() - l.prevAccept.reset() - - dfa := l.decisionToDFA[mode] - - var s0 *DFAState - l.atn.stateMu.RLock() - s0 = dfa.getS0() - l.atn.stateMu.RUnlock() - - if s0 == nil { - return l.MatchATN(input) - } - - return l.execATN(input, s0) -} - -func (l *LexerATNSimulator) reset() { - l.prevAccept.reset() - l.startIndex = -1 - l.Line = 1 - l.CharPositionInLine = 0 - l.mode = LexerDefaultMode -} - -func (l *LexerATNSimulator) MatchATN(input CharStream) int { - startState := l.atn.modeToStartState[l.mode] - - if LexerATNSimulatorDebug { - fmt.Println("MatchATN mode " + strconv.Itoa(l.mode) + " start: " + startState.String()) - } - oldMode := l.mode - s0Closure := l.computeStartState(input, startState) - suppressEdge := s0Closure.hasSemanticContext - s0Closure.hasSemanticContext = false - - next := l.addDFAState(s0Closure, suppressEdge) - - predict := l.execATN(input, next) - - if LexerATNSimulatorDebug { - fmt.Println("DFA after MatchATN: " + l.decisionToDFA[oldMode].ToLexerString()) - } - return predict -} - -func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int { - - if LexerATNSimulatorDebug { - fmt.Println("start state closure=" + ds0.configs.String()) - } - if ds0.isAcceptState { - // allow zero-length tokens - l.captureSimState(l.prevAccept, input, ds0) - } - t := input.LA(1) - s := ds0 // s is current/from DFA state - - for { // while more work - if LexerATNSimulatorDebug { - fmt.Println("execATN loop starting closure: " + s.configs.String()) - } - - // As we move src->trg, src->trg, we keep track of the previous trg to - // avoid looking up the DFA state again, which is expensive. - // If the previous target was already part of the DFA, we might - // be able to avoid doing a reach operation upon t. If s!=nil, - // it means that semantic predicates didn't prevent us from - // creating a DFA state. Once we know s!=nil, we check to see if - // the DFA state has an edge already for t. If so, we can just reuse - // it's configuration set there's no point in re-computing it. - // This is kind of like doing DFA simulation within the ATN - // simulation because DFA simulation is really just a way to avoid - // computing reach/closure sets. Technically, once we know that - // we have a previously added DFA state, we could jump over to - // the DFA simulator. But, that would mean popping back and forth - // a lot and making things more complicated algorithmically. - // This optimization makes a lot of sense for loops within DFA. - // A character will take us back to an existing DFA state - // that already has lots of edges out of it. e.g., .* in comments. - target := l.getExistingTargetState(s, t) - if target == nil { - target = l.computeTargetState(input, s, t) - // print("Computed:" + str(target)) - } - if target == ATNSimulatorError { - break - } - // If l is a consumable input element, make sure to consume before - // capturing the accept state so the input index, line, and char - // position accurately reflect the state of the interpreter at the - // end of the token. - if t != TokenEOF { - l.Consume(input) - } - if target.isAcceptState { - l.captureSimState(l.prevAccept, input, target) - if t == TokenEOF { - break - } - } - t = input.LA(1) - s = target // flip current DFA target becomes Newsrc/from state - } - - return l.failOrAccept(l.prevAccept, input, s.configs, t) -} - -// Get an existing target state for an edge in the DFA. If the target state -// for the edge has not yet been computed or is otherwise not available, -// l method returns {@code nil}. -// -// @param s The current DFA state -// @param t The next input symbol -// @return The existing target DFA state for the given input symbol -// {@code t}, or {@code nil} if the target state for l edge is not -// already cached -func (l *LexerATNSimulator) getExistingTargetState(s *DFAState, t int) *DFAState { - if t < LexerATNSimulatorMinDFAEdge || t > LexerATNSimulatorMaxDFAEdge { - return nil - } - - l.atn.edgeMu.RLock() - defer l.atn.edgeMu.RUnlock() - if s.getEdges() == nil { - return nil - } - target := s.getIthEdge(t - LexerATNSimulatorMinDFAEdge) - if LexerATNSimulatorDebug && target != nil { - fmt.Println("reuse state " + strconv.Itoa(s.stateNumber) + " edge to " + strconv.Itoa(target.stateNumber)) - } - return target -} - -// Compute a target state for an edge in the DFA, and attempt to add the -// computed state and corresponding edge to the DFA. -// -// @param input The input stream -// @param s The current DFA state -// @param t The next input symbol -// -// @return The computed target DFA state for the given input symbol -// {@code t}. If {@code t} does not lead to a valid DFA state, l method -// returns {@link //ERROR}. -func (l *LexerATNSimulator) computeTargetState(input CharStream, s *DFAState, t int) *DFAState { - reach := NewOrderedATNConfigSet() - - // if we don't find an existing DFA state - // Fill reach starting from closure, following t transitions - l.getReachableConfigSet(input, s.configs, reach.BaseATNConfigSet, t) - - if len(reach.configs) == 0 { // we got nowhere on t from s - if !reach.hasSemanticContext { - // we got nowhere on t, don't panic out l knowledge it'd - // cause a failover from DFA later. - l.addDFAEdge(s, t, ATNSimulatorError, nil) - } - // stop when we can't Match any more char - return ATNSimulatorError - } - // Add an edge from s to target DFA found/created for reach - return l.addDFAEdge(s, t, nil, reach.BaseATNConfigSet) -} - -func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, reach ATNConfigSet, t int) int { - if l.prevAccept.dfaState != nil { - lexerActionExecutor := prevAccept.dfaState.lexerActionExecutor - l.accept(input, lexerActionExecutor, l.startIndex, prevAccept.index, prevAccept.line, prevAccept.column) - return prevAccept.dfaState.prediction - } - - // if no accept and EOF is first char, return EOF - if t == TokenEOF && input.Index() == l.startIndex { - return TokenEOF - } - - panic(NewLexerNoViableAltException(l.recog, input, l.startIndex, reach)) -} - -// Given a starting configuration set, figure out all ATN configurations -// we can reach upon input {@code t}. Parameter {@code reach} is a return -// parameter. -func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure ATNConfigSet, reach ATNConfigSet, t int) { - // l is used to Skip processing for configs which have a lower priority - // than a config that already reached an accept state for the same rule - SkipAlt := ATNInvalidAltNumber - - for _, cfg := range closure.GetItems() { - currentAltReachedAcceptState := (cfg.GetAlt() == SkipAlt) - if currentAltReachedAcceptState && cfg.(*LexerATNConfig).passedThroughNonGreedyDecision { - continue - } - - if LexerATNSimulatorDebug { - - fmt.Printf("testing %s at %s\n", l.GetTokenName(t), cfg.String()) // l.recog, true)) - } - - for _, trans := range cfg.GetState().GetTransitions() { - target := l.getReachableTarget(trans, t) - if target != nil { - lexerActionExecutor := cfg.(*LexerATNConfig).lexerActionExecutor - if lexerActionExecutor != nil { - lexerActionExecutor = lexerActionExecutor.fixOffsetBeforeMatch(input.Index() - l.startIndex) - } - treatEOFAsEpsilon := (t == TokenEOF) - config := NewLexerATNConfig3(cfg.(*LexerATNConfig), target, lexerActionExecutor) - if l.closure(input, config, reach, - currentAltReachedAcceptState, true, treatEOFAsEpsilon) { - // any remaining configs for l alt have a lower priority - // than the one that just reached an accept state. - SkipAlt = cfg.GetAlt() - } - } - } - } -} - -func (l *LexerATNSimulator) accept(input CharStream, lexerActionExecutor *LexerActionExecutor, startIndex, index, line, charPos int) { - if LexerATNSimulatorDebug { - fmt.Printf("ACTION %v\n", lexerActionExecutor) - } - // seek to after last char in token - input.Seek(index) - l.Line = line - l.CharPositionInLine = charPos - if lexerActionExecutor != nil && l.recog != nil { - lexerActionExecutor.execute(l.recog, input, startIndex) - } -} - -func (l *LexerATNSimulator) getReachableTarget(trans Transition, t int) ATNState { - if trans.Matches(t, 0, LexerMaxCharValue) { - return trans.getTarget() - } - - return nil -} - -func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *OrderedATNConfigSet { - configs := NewOrderedATNConfigSet() - for i := 0; i < len(p.GetTransitions()); i++ { - target := p.GetTransitions()[i].getTarget() - cfg := NewLexerATNConfig6(target, i+1, BasePredictionContextEMPTY) - l.closure(input, cfg, configs, false, false, false) - } - - return configs -} - -// Since the alternatives within any lexer decision are ordered by -// preference, l method stops pursuing the closure as soon as an accept -// state is reached. After the first accept state is reached by depth-first -// search from {@code config}, all other (potentially reachable) states for -// l rule would have a lower priority. -// -// @return {@code true} if an accept state is reached, otherwise -// {@code false}. -func (l *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig, configs ATNConfigSet, - currentAltReachedAcceptState, speculative, treatEOFAsEpsilon bool) bool { - - if LexerATNSimulatorDebug { - fmt.Println("closure(" + config.String() + ")") // config.String(l.recog, true) + ")") - } - - _, ok := config.state.(*RuleStopState) - if ok { - - if LexerATNSimulatorDebug { - if l.recog != nil { - fmt.Printf("closure at %s rule stop %s\n", l.recog.GetRuleNames()[config.state.GetRuleIndex()], config) - } else { - fmt.Printf("closure at rule stop %s\n", config) - } - } - - if config.context == nil || config.context.hasEmptyPath() { - if config.context == nil || config.context.isEmpty() { - configs.Add(config, nil) - return true - } - - configs.Add(NewLexerATNConfig2(config, config.state, BasePredictionContextEMPTY), nil) - currentAltReachedAcceptState = true - } - if config.context != nil && !config.context.isEmpty() { - for i := 0; i < config.context.length(); i++ { - if config.context.getReturnState(i) != BasePredictionContextEmptyReturnState { - newContext := config.context.GetParent(i) // "pop" return state - returnState := l.atn.states[config.context.getReturnState(i)] - cfg := NewLexerATNConfig2(config, returnState, newContext) - currentAltReachedAcceptState = l.closure(input, cfg, configs, currentAltReachedAcceptState, speculative, treatEOFAsEpsilon) - } - } - } - return currentAltReachedAcceptState - } - // optimization - if !config.state.GetEpsilonOnlyTransitions() { - if !currentAltReachedAcceptState || !config.passedThroughNonGreedyDecision { - configs.Add(config, nil) - } - } - for j := 0; j < len(config.state.GetTransitions()); j++ { - trans := config.state.GetTransitions()[j] - cfg := l.getEpsilonTarget(input, config, trans, configs, speculative, treatEOFAsEpsilon) - if cfg != nil { - currentAltReachedAcceptState = l.closure(input, cfg, configs, - currentAltReachedAcceptState, speculative, treatEOFAsEpsilon) - } - } - return currentAltReachedAcceptState -} - -// side-effect: can alter configs.hasSemanticContext -func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNConfig, trans Transition, - configs ATNConfigSet, speculative, treatEOFAsEpsilon bool) *LexerATNConfig { - - var cfg *LexerATNConfig - - if trans.getSerializationType() == TransitionRULE { - - rt := trans.(*RuleTransition) - newContext := SingletonBasePredictionContextCreate(config.context, rt.followState.GetStateNumber()) - cfg = NewLexerATNConfig2(config, trans.getTarget(), newContext) - - } else if trans.getSerializationType() == TransitionPRECEDENCE { - panic("Precedence predicates are not supported in lexers.") - } else if trans.getSerializationType() == TransitionPREDICATE { - // Track traversing semantic predicates. If we traverse, - // we cannot add a DFA state for l "reach" computation - // because the DFA would not test the predicate again in the - // future. Rather than creating collections of semantic predicates - // like v3 and testing them on prediction, v4 will test them on the - // fly all the time using the ATN not the DFA. This is slower but - // semantically it's not used that often. One of the key elements to - // l predicate mechanism is not adding DFA states that see - // predicates immediately afterwards in the ATN. For example, - - // a : ID {p1}? | ID {p2}? - - // should create the start state for rule 'a' (to save start state - // competition), but should not create target of ID state. The - // collection of ATN states the following ID references includes - // states reached by traversing predicates. Since l is when we - // test them, we cannot cash the DFA state target of ID. - - pt := trans.(*PredicateTransition) - - if LexerATNSimulatorDebug { - fmt.Println("EVAL rule " + strconv.Itoa(trans.(*PredicateTransition).ruleIndex) + ":" + strconv.Itoa(pt.predIndex)) - } - configs.SetHasSemanticContext(true) - if l.evaluatePredicate(input, pt.ruleIndex, pt.predIndex, speculative) { - cfg = NewLexerATNConfig4(config, trans.getTarget()) - } - } else if trans.getSerializationType() == TransitionACTION { - if config.context == nil || config.context.hasEmptyPath() { - // execute actions anywhere in the start rule for a token. - // - // TODO: if the entry rule is invoked recursively, some - // actions may be executed during the recursive call. The - // problem can appear when hasEmptyPath() is true but - // isEmpty() is false. In l case, the config needs to be - // split into two contexts - one with just the empty path - // and another with everything but the empty path. - // Unfortunately, the current algorithm does not allow - // getEpsilonTarget to return two configurations, so - // additional modifications are needed before we can support - // the split operation. - lexerActionExecutor := LexerActionExecutorappend(config.lexerActionExecutor, l.atn.lexerActions[trans.(*ActionTransition).actionIndex]) - cfg = NewLexerATNConfig3(config, trans.getTarget(), lexerActionExecutor) - } else { - // ignore actions in referenced rules - cfg = NewLexerATNConfig4(config, trans.getTarget()) - } - } else if trans.getSerializationType() == TransitionEPSILON { - cfg = NewLexerATNConfig4(config, trans.getTarget()) - } else if trans.getSerializationType() == TransitionATOM || - trans.getSerializationType() == TransitionRANGE || - trans.getSerializationType() == TransitionSET { - if treatEOFAsEpsilon { - if trans.Matches(TokenEOF, 0, LexerMaxCharValue) { - cfg = NewLexerATNConfig4(config, trans.getTarget()) - } - } - } - return cfg -} - -// Evaluate a predicate specified in the lexer. -// -//

If {@code speculative} is {@code true}, l method was called before -// {@link //consume} for the Matched character. This method should call -// {@link //consume} before evaluating the predicate to ensure position -// sensitive values, including {@link Lexer//GetText}, {@link Lexer//GetLine}, -// and {@link Lexer//getcolumn}, properly reflect the current -// lexer state. This method should restore {@code input} and the simulator -// to the original state before returning (i.e. undo the actions made by the -// call to {@link //consume}.

-// -// @param input The input stream. -// @param ruleIndex The rule containing the predicate. -// @param predIndex The index of the predicate within the rule. -// @param speculative {@code true} if the current index in {@code input} is -// one character before the predicate's location. -// -// @return {@code true} if the specified predicate evaluates to -// {@code true}. -// / -func (l *LexerATNSimulator) evaluatePredicate(input CharStream, ruleIndex, predIndex int, speculative bool) bool { - // assume true if no recognizer was provided - if l.recog == nil { - return true - } - if !speculative { - return l.recog.Sempred(nil, ruleIndex, predIndex) - } - savedcolumn := l.CharPositionInLine - savedLine := l.Line - index := input.Index() - marker := input.Mark() - - defer func() { - l.CharPositionInLine = savedcolumn - l.Line = savedLine - input.Seek(index) - input.Release(marker) - }() - - l.Consume(input) - return l.recog.Sempred(nil, ruleIndex, predIndex) -} - -func (l *LexerATNSimulator) captureSimState(settings *SimState, input CharStream, dfaState *DFAState) { - settings.index = input.Index() - settings.line = l.Line - settings.column = l.CharPositionInLine - settings.dfaState = dfaState -} - -func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfgs ATNConfigSet) *DFAState { - if to == nil && cfgs != nil { - // leading to l call, ATNConfigSet.hasSemanticContext is used as a - // marker indicating dynamic predicate evaluation makes l edge - // dependent on the specific input sequence, so the static edge in the - // DFA should be omitted. The target DFAState is still created since - // execATN has the ability to reSynchronize with the DFA state cache - // following the predicate evaluation step. - // - // TJP notes: next time through the DFA, we see a pred again and eval. - // If that gets us to a previously created (but dangling) DFA - // state, we can continue in pure DFA mode from there. - // / - suppressEdge := cfgs.HasSemanticContext() - cfgs.SetHasSemanticContext(false) - - to = l.addDFAState(cfgs, true) - - if suppressEdge { - return to - } - } - // add the edge - if tk < LexerATNSimulatorMinDFAEdge || tk > LexerATNSimulatorMaxDFAEdge { - // Only track edges within the DFA bounds - return to - } - if LexerATNSimulatorDebug { - fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + strconv.Itoa(tk)) - } - l.atn.edgeMu.Lock() - defer l.atn.edgeMu.Unlock() - if from.getEdges() == nil { - // make room for tokens 1..n and -1 masquerading as index 0 - from.setEdges(make([]*DFAState, LexerATNSimulatorMaxDFAEdge-LexerATNSimulatorMinDFAEdge+1)) - } - from.setIthEdge(tk-LexerATNSimulatorMinDFAEdge, to) // connect - - return to -} - -// Add a NewDFA state if there isn't one with l set of -// configurations already. This method also detects the first -// configuration containing an ATN rule stop state. Later, when -// traversing the DFA, we will know which rule to accept. -func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet, suppressEdge bool) *DFAState { - - proposed := NewDFAState(-1, configs) - var firstConfigWithRuleStopState ATNConfig - - for _, cfg := range configs.GetItems() { - - _, ok := cfg.GetState().(*RuleStopState) - - if ok { - firstConfigWithRuleStopState = cfg - break - } - } - if firstConfigWithRuleStopState != nil { - proposed.isAcceptState = true - proposed.lexerActionExecutor = firstConfigWithRuleStopState.(*LexerATNConfig).lexerActionExecutor - proposed.setPrediction(l.atn.ruleToTokenType[firstConfigWithRuleStopState.GetState().GetRuleIndex()]) - } - dfa := l.decisionToDFA[l.mode] - - l.atn.stateMu.Lock() - defer l.atn.stateMu.Unlock() - existing, present := dfa.states.Get(proposed) - if present { - - // This state was already present, so just return it. - // - proposed = existing - } else { - - // We need to add the new state - // - proposed.stateNumber = dfa.states.Len() - configs.SetReadOnly(true) - proposed.configs = configs - dfa.states.Put(proposed) - } - if !suppressEdge { - dfa.setS0(proposed) - } - return proposed -} - -func (l *LexerATNSimulator) getDFA(mode int) *DFA { - return l.decisionToDFA[mode] -} - -// Get the text Matched so far for the current token. -func (l *LexerATNSimulator) GetText(input CharStream) string { - // index is first lookahead char, don't include. - return input.GetTextFromInterval(NewInterval(l.startIndex, input.Index()-1)) -} - -func (l *LexerATNSimulator) Consume(input CharStream) { - curChar := input.LA(1) - if curChar == int('\n') { - l.Line++ - l.CharPositionInLine = 0 - } else { - l.CharPositionInLine++ - } - input.Consume() -} - -func (l *LexerATNSimulator) GetCharPositionInLine() int { - return l.CharPositionInLine -} - -func (l *LexerATNSimulator) GetLine() int { - return l.Line -} - -func (l *LexerATNSimulator) GetTokenName(tt int) string { - if tt == -1 { - return "EOF" - } - - var sb strings.Builder - sb.Grow(6) - sb.WriteByte('\'') - sb.WriteRune(rune(tt)) - sb.WriteByte('\'') - - return sb.String() -} - -func resetSimState(sim *SimState) { - sim.index = -1 - sim.line = 0 - sim.column = -1 - sim.dfaState = nil -} - -type SimState struct { - index int - line int - column int - dfaState *DFAState -} - -func NewSimState() *SimState { - s := new(SimState) - resetSimState(s) - return s -} - -func (s *SimState) reset() { - resetSimState(s) -} +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" + "strings" +) + +var ( + LexerATNSimulatorDebug = false + LexerATNSimulatorDFADebug = false + + LexerATNSimulatorMinDFAEdge = 0 + LexerATNSimulatorMaxDFAEdge = 127 // forces unicode to stay in ATN + + LexerATNSimulatorMatchCalls = 0 +) + +type ILexerATNSimulator interface { + IATNSimulator + + reset() + Match(input CharStream, mode int) int + GetCharPositionInLine() int + GetLine() int + GetText(input CharStream) string + Consume(input CharStream) +} + +type LexerATNSimulator struct { + *BaseATNSimulator + + recog Lexer + predictionMode int + mergeCache DoubleDict + startIndex int + Line int + CharPositionInLine int + mode int + prevAccept *SimState + MatchCalls int +} + +func NewLexerATNSimulator(recog Lexer, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *LexerATNSimulator { + l := new(LexerATNSimulator) + + l.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache) + + l.decisionToDFA = decisionToDFA + l.recog = recog + // The current token's starting index into the character stream. + // Shared across DFA to ATN simulation in case the ATN fails and the + // DFA did not have a previous accept state. In l case, we use the + // ATN-generated exception object. + l.startIndex = -1 + // line number 1..n within the input/// + l.Line = 1 + // The index of the character relative to the beginning of the line + // 0..n-1/// + l.CharPositionInLine = 0 + l.mode = LexerDefaultMode + // Used during DFA/ATN exec to record the most recent accept configuration + // info + l.prevAccept = NewSimState() + // done + return l +} + +func (l *LexerATNSimulator) copyState(simulator *LexerATNSimulator) { + l.CharPositionInLine = simulator.CharPositionInLine + l.Line = simulator.Line + l.mode = simulator.mode + l.startIndex = simulator.startIndex +} + +func (l *LexerATNSimulator) Match(input CharStream, mode int) int { + l.MatchCalls++ + l.mode = mode + mark := input.Mark() + + defer func() { + input.Release(mark) + }() + + l.startIndex = input.Index() + l.prevAccept.reset() + + dfa := l.decisionToDFA[mode] + + var s0 *DFAState + l.atn.stateMu.RLock() + s0 = dfa.getS0() + l.atn.stateMu.RUnlock() + + if s0 == nil { + return l.MatchATN(input) + } + + return l.execATN(input, s0) +} + +func (l *LexerATNSimulator) reset() { + l.prevAccept.reset() + l.startIndex = -1 + l.Line = 1 + l.CharPositionInLine = 0 + l.mode = LexerDefaultMode +} + +func (l *LexerATNSimulator) MatchATN(input CharStream) int { + startState := l.atn.modeToStartState[l.mode] + + if LexerATNSimulatorDebug { + fmt.Println("MatchATN mode " + strconv.Itoa(l.mode) + " start: " + startState.String()) + } + oldMode := l.mode + s0Closure := l.computeStartState(input, startState) + suppressEdge := s0Closure.hasSemanticContext + s0Closure.hasSemanticContext = false + + next := l.addDFAState(s0Closure, suppressEdge) + + predict := l.execATN(input, next) + + if LexerATNSimulatorDebug { + fmt.Println("DFA after MatchATN: " + l.decisionToDFA[oldMode].ToLexerString()) + } + return predict +} + +func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int { + + if LexerATNSimulatorDebug { + fmt.Println("start state closure=" + ds0.configs.String()) + } + if ds0.isAcceptState { + // allow zero-length tokens + l.captureSimState(l.prevAccept, input, ds0) + } + t := input.LA(1) + s := ds0 // s is current/from DFA state + + for { // while more work + if LexerATNSimulatorDebug { + fmt.Println("execATN loop starting closure: " + s.configs.String()) + } + + // As we move src->trg, src->trg, we keep track of the previous trg to + // avoid looking up the DFA state again, which is expensive. + // If the previous target was already part of the DFA, we might + // be able to avoid doing a reach operation upon t. If s!=nil, + // it means that semantic predicates didn't prevent us from + // creating a DFA state. Once we know s!=nil, we check to see if + // the DFA state has an edge already for t. If so, we can just reuse + // it's configuration set there's no point in re-computing it. + // This is kind of like doing DFA simulation within the ATN + // simulation because DFA simulation is really just a way to avoid + // computing reach/closure sets. Technically, once we know that + // we have a previously added DFA state, we could jump over to + // the DFA simulator. But, that would mean popping back and forth + // a lot and making things more complicated algorithmically. + // This optimization makes a lot of sense for loops within DFA. + // A character will take us back to an existing DFA state + // that already has lots of edges out of it. e.g., .* in comments. + target := l.getExistingTargetState(s, t) + if target == nil { + target = l.computeTargetState(input, s, t) + // print("Computed:" + str(target)) + } + if target == ATNSimulatorError { + break + } + // If l is a consumable input element, make sure to consume before + // capturing the accept state so the input index, line, and char + // position accurately reflect the state of the interpreter at the + // end of the token. + if t != TokenEOF { + l.Consume(input) + } + if target.isAcceptState { + l.captureSimState(l.prevAccept, input, target) + if t == TokenEOF { + break + } + } + t = input.LA(1) + s = target // flip current DFA target becomes Newsrc/from state + } + + return l.failOrAccept(l.prevAccept, input, s.configs, t) +} + +// Get an existing target state for an edge in the DFA. If the target state +// for the edge has not yet been computed or is otherwise not available, +// l method returns {@code nil}. +// +// @param s The current DFA state +// @param t The next input symbol +// @return The existing target DFA state for the given input symbol +// {@code t}, or {@code nil} if the target state for l edge is not +// already cached +func (l *LexerATNSimulator) getExistingTargetState(s *DFAState, t int) *DFAState { + if t < LexerATNSimulatorMinDFAEdge || t > LexerATNSimulatorMaxDFAEdge { + return nil + } + + l.atn.edgeMu.RLock() + defer l.atn.edgeMu.RUnlock() + if s.getEdges() == nil { + return nil + } + target := s.getIthEdge(t - LexerATNSimulatorMinDFAEdge) + if LexerATNSimulatorDebug && target != nil { + fmt.Println("reuse state " + strconv.Itoa(s.stateNumber) + " edge to " + strconv.Itoa(target.stateNumber)) + } + return target +} + +// Compute a target state for an edge in the DFA, and attempt to add the +// computed state and corresponding edge to the DFA. +// +// @param input The input stream +// @param s The current DFA state +// @param t The next input symbol +// +// @return The computed target DFA state for the given input symbol +// {@code t}. If {@code t} does not lead to a valid DFA state, l method +// returns {@link //ERROR}. +func (l *LexerATNSimulator) computeTargetState(input CharStream, s *DFAState, t int) *DFAState { + reach := NewOrderedATNConfigSet() + + // if we don't find an existing DFA state + // Fill reach starting from closure, following t transitions + l.getReachableConfigSet(input, s.configs, reach.BaseATNConfigSet, t) + + if len(reach.configs) == 0 { // we got nowhere on t from s + if !reach.hasSemanticContext { + // we got nowhere on t, don't panic out l knowledge it'd + // cause a failover from DFA later. + l.addDFAEdge(s, t, ATNSimulatorError, nil) + } + // stop when we can't Match any more char + return ATNSimulatorError + } + // Add an edge from s to target DFA found/created for reach + return l.addDFAEdge(s, t, nil, reach.BaseATNConfigSet) +} + +func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, reach ATNConfigSet, t int) int { + if l.prevAccept.dfaState != nil { + lexerActionExecutor := prevAccept.dfaState.lexerActionExecutor + l.accept(input, lexerActionExecutor, l.startIndex, prevAccept.index, prevAccept.line, prevAccept.column) + return prevAccept.dfaState.prediction + } + + // if no accept and EOF is first char, return EOF + if t == TokenEOF && input.Index() == l.startIndex { + return TokenEOF + } + + panic(NewLexerNoViableAltException(l.recog, input, l.startIndex, reach)) +} + +// Given a starting configuration set, figure out all ATN configurations +// we can reach upon input {@code t}. Parameter {@code reach} is a return +// parameter. +func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure ATNConfigSet, reach ATNConfigSet, t int) { + // l is used to Skip processing for configs which have a lower priority + // than a config that already reached an accept state for the same rule + SkipAlt := ATNInvalidAltNumber + + for _, cfg := range closure.GetItems() { + currentAltReachedAcceptState := (cfg.GetAlt() == SkipAlt) + if currentAltReachedAcceptState && cfg.(*LexerATNConfig).passedThroughNonGreedyDecision { + continue + } + + if LexerATNSimulatorDebug { + + fmt.Printf("testing %s at %s\n", l.GetTokenName(t), cfg.String()) // l.recog, true)) + } + + for _, trans := range cfg.GetState().GetTransitions() { + target := l.getReachableTarget(trans, t) + if target != nil { + lexerActionExecutor := cfg.(*LexerATNConfig).lexerActionExecutor + if lexerActionExecutor != nil { + lexerActionExecutor = lexerActionExecutor.fixOffsetBeforeMatch(input.Index() - l.startIndex) + } + treatEOFAsEpsilon := (t == TokenEOF) + config := NewLexerATNConfig3(cfg.(*LexerATNConfig), target, lexerActionExecutor) + if l.closure(input, config, reach, + currentAltReachedAcceptState, true, treatEOFAsEpsilon) { + // any remaining configs for l alt have a lower priority + // than the one that just reached an accept state. + SkipAlt = cfg.GetAlt() + } + } + } + } +} + +func (l *LexerATNSimulator) accept(input CharStream, lexerActionExecutor *LexerActionExecutor, startIndex, index, line, charPos int) { + if LexerATNSimulatorDebug { + fmt.Printf("ACTION %v\n", lexerActionExecutor) + } + // seek to after last char in token + input.Seek(index) + l.Line = line + l.CharPositionInLine = charPos + if lexerActionExecutor != nil && l.recog != nil { + lexerActionExecutor.execute(l.recog, input, startIndex) + } +} + +func (l *LexerATNSimulator) getReachableTarget(trans Transition, t int) ATNState { + if trans.Matches(t, 0, LexerMaxCharValue) { + return trans.getTarget() + } + + return nil +} + +func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *OrderedATNConfigSet { + configs := NewOrderedATNConfigSet() + for i := 0; i < len(p.GetTransitions()); i++ { + target := p.GetTransitions()[i].getTarget() + cfg := NewLexerATNConfig6(target, i+1, BasePredictionContextEMPTY) + l.closure(input, cfg, configs, false, false, false) + } + + return configs +} + +// Since the alternatives within any lexer decision are ordered by +// preference, l method stops pursuing the closure as soon as an accept +// state is reached. After the first accept state is reached by depth-first +// search from {@code config}, all other (potentially reachable) states for +// l rule would have a lower priority. +// +// @return {@code true} if an accept state is reached, otherwise +// {@code false}. +func (l *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig, configs ATNConfigSet, + currentAltReachedAcceptState, speculative, treatEOFAsEpsilon bool) bool { + + if LexerATNSimulatorDebug { + fmt.Println("closure(" + config.String() + ")") // config.String(l.recog, true) + ")") + } + + _, ok := config.state.(*RuleStopState) + if ok { + + if LexerATNSimulatorDebug { + if l.recog != nil { + fmt.Printf("closure at %s rule stop %s\n", l.recog.GetRuleNames()[config.state.GetRuleIndex()], config) + } else { + fmt.Printf("closure at rule stop %s\n", config) + } + } + + if config.context == nil || config.context.hasEmptyPath() { + if config.context == nil || config.context.isEmpty() { + configs.Add(config, nil) + return true + } + + configs.Add(NewLexerATNConfig2(config, config.state, BasePredictionContextEMPTY), nil) + currentAltReachedAcceptState = true + } + if config.context != nil && !config.context.isEmpty() { + for i := 0; i < config.context.length(); i++ { + if config.context.getReturnState(i) != BasePredictionContextEmptyReturnState { + newContext := config.context.GetParent(i) // "pop" return state + returnState := l.atn.states[config.context.getReturnState(i)] + cfg := NewLexerATNConfig2(config, returnState, newContext) + currentAltReachedAcceptState = l.closure(input, cfg, configs, currentAltReachedAcceptState, speculative, treatEOFAsEpsilon) + } + } + } + return currentAltReachedAcceptState + } + // optimization + if !config.state.GetEpsilonOnlyTransitions() { + if !currentAltReachedAcceptState || !config.passedThroughNonGreedyDecision { + configs.Add(config, nil) + } + } + for j := 0; j < len(config.state.GetTransitions()); j++ { + trans := config.state.GetTransitions()[j] + cfg := l.getEpsilonTarget(input, config, trans, configs, speculative, treatEOFAsEpsilon) + if cfg != nil { + currentAltReachedAcceptState = l.closure(input, cfg, configs, + currentAltReachedAcceptState, speculative, treatEOFAsEpsilon) + } + } + return currentAltReachedAcceptState +} + +// side-effect: can alter configs.hasSemanticContext +func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNConfig, trans Transition, + configs ATNConfigSet, speculative, treatEOFAsEpsilon bool) *LexerATNConfig { + + var cfg *LexerATNConfig + + if trans.getSerializationType() == TransitionRULE { + + rt := trans.(*RuleTransition) + newContext := SingletonBasePredictionContextCreate(config.context, rt.followState.GetStateNumber()) + cfg = NewLexerATNConfig2(config, trans.getTarget(), newContext) + + } else if trans.getSerializationType() == TransitionPRECEDENCE { + panic("Precedence predicates are not supported in lexers.") + } else if trans.getSerializationType() == TransitionPREDICATE { + // Track traversing semantic predicates. If we traverse, + // we cannot add a DFA state for l "reach" computation + // because the DFA would not test the predicate again in the + // future. Rather than creating collections of semantic predicates + // like v3 and testing them on prediction, v4 will test them on the + // fly all the time using the ATN not the DFA. This is slower but + // semantically it's not used that often. One of the key elements to + // l predicate mechanism is not adding DFA states that see + // predicates immediately afterwards in the ATN. For example, + + // a : ID {p1}? | ID {p2}? + + // should create the start state for rule 'a' (to save start state + // competition), but should not create target of ID state. The + // collection of ATN states the following ID references includes + // states reached by traversing predicates. Since l is when we + // test them, we cannot cash the DFA state target of ID. + + pt := trans.(*PredicateTransition) + + if LexerATNSimulatorDebug { + fmt.Println("EVAL rule " + strconv.Itoa(trans.(*PredicateTransition).ruleIndex) + ":" + strconv.Itoa(pt.predIndex)) + } + configs.SetHasSemanticContext(true) + if l.evaluatePredicate(input, pt.ruleIndex, pt.predIndex, speculative) { + cfg = NewLexerATNConfig4(config, trans.getTarget()) + } + } else if trans.getSerializationType() == TransitionACTION { + if config.context == nil || config.context.hasEmptyPath() { + // execute actions anywhere in the start rule for a token. + // + // TODO: if the entry rule is invoked recursively, some + // actions may be executed during the recursive call. The + // problem can appear when hasEmptyPath() is true but + // isEmpty() is false. In l case, the config needs to be + // split into two contexts - one with just the empty path + // and another with everything but the empty path. + // Unfortunately, the current algorithm does not allow + // getEpsilonTarget to return two configurations, so + // additional modifications are needed before we can support + // the split operation. + lexerActionExecutor := LexerActionExecutorappend(config.lexerActionExecutor, l.atn.lexerActions[trans.(*ActionTransition).actionIndex]) + cfg = NewLexerATNConfig3(config, trans.getTarget(), lexerActionExecutor) + } else { + // ignore actions in referenced rules + cfg = NewLexerATNConfig4(config, trans.getTarget()) + } + } else if trans.getSerializationType() == TransitionEPSILON { + cfg = NewLexerATNConfig4(config, trans.getTarget()) + } else if trans.getSerializationType() == TransitionATOM || + trans.getSerializationType() == TransitionRANGE || + trans.getSerializationType() == TransitionSET { + if treatEOFAsEpsilon { + if trans.Matches(TokenEOF, 0, LexerMaxCharValue) { + cfg = NewLexerATNConfig4(config, trans.getTarget()) + } + } + } + return cfg +} + +// Evaluate a predicate specified in the lexer. +// +//

If {@code speculative} is {@code true}, l method was called before +// {@link //consume} for the Matched character. This method should call +// {@link //consume} before evaluating the predicate to ensure position +// sensitive values, including {@link Lexer//GetText}, {@link Lexer//GetLine}, +// and {@link Lexer//getcolumn}, properly reflect the current +// lexer state. This method should restore {@code input} and the simulator +// to the original state before returning (i.e. undo the actions made by the +// call to {@link //consume}.

+// +// @param input The input stream. +// @param ruleIndex The rule containing the predicate. +// @param predIndex The index of the predicate within the rule. +// @param speculative {@code true} if the current index in {@code input} is +// one character before the predicate's location. +// +// @return {@code true} if the specified predicate evaluates to +// {@code true}. +// / +func (l *LexerATNSimulator) evaluatePredicate(input CharStream, ruleIndex, predIndex int, speculative bool) bool { + // assume true if no recognizer was provided + if l.recog == nil { + return true + } + if !speculative { + return l.recog.Sempred(nil, ruleIndex, predIndex) + } + savedcolumn := l.CharPositionInLine + savedLine := l.Line + index := input.Index() + marker := input.Mark() + + defer func() { + l.CharPositionInLine = savedcolumn + l.Line = savedLine + input.Seek(index) + input.Release(marker) + }() + + l.Consume(input) + return l.recog.Sempred(nil, ruleIndex, predIndex) +} + +func (l *LexerATNSimulator) captureSimState(settings *SimState, input CharStream, dfaState *DFAState) { + settings.index = input.Index() + settings.line = l.Line + settings.column = l.CharPositionInLine + settings.dfaState = dfaState +} + +func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfgs ATNConfigSet) *DFAState { + if to == nil && cfgs != nil { + // leading to l call, ATNConfigSet.hasSemanticContext is used as a + // marker indicating dynamic predicate evaluation makes l edge + // dependent on the specific input sequence, so the static edge in the + // DFA should be omitted. The target DFAState is still created since + // execATN has the ability to reSynchronize with the DFA state cache + // following the predicate evaluation step. + // + // TJP notes: next time through the DFA, we see a pred again and eval. + // If that gets us to a previously created (but dangling) DFA + // state, we can continue in pure DFA mode from there. + // / + suppressEdge := cfgs.HasSemanticContext() + cfgs.SetHasSemanticContext(false) + + to = l.addDFAState(cfgs, true) + + if suppressEdge { + return to + } + } + // add the edge + if tk < LexerATNSimulatorMinDFAEdge || tk > LexerATNSimulatorMaxDFAEdge { + // Only track edges within the DFA bounds + return to + } + if LexerATNSimulatorDebug { + fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + strconv.Itoa(tk)) + } + l.atn.edgeMu.Lock() + defer l.atn.edgeMu.Unlock() + if from.getEdges() == nil { + // make room for tokens 1..n and -1 masquerading as index 0 + from.setEdges(make([]*DFAState, LexerATNSimulatorMaxDFAEdge-LexerATNSimulatorMinDFAEdge+1)) + } + from.setIthEdge(tk-LexerATNSimulatorMinDFAEdge, to) // connect + + return to +} + +// Add a NewDFA state if there isn't one with l set of +// configurations already. This method also detects the first +// configuration containing an ATN rule stop state. Later, when +// traversing the DFA, we will know which rule to accept. +func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet, suppressEdge bool) *DFAState { + + proposed := NewDFAState(-1, configs) + var firstConfigWithRuleStopState ATNConfig + + for _, cfg := range configs.GetItems() { + + _, ok := cfg.GetState().(*RuleStopState) + + if ok { + firstConfigWithRuleStopState = cfg + break + } + } + if firstConfigWithRuleStopState != nil { + proposed.isAcceptState = true + proposed.lexerActionExecutor = firstConfigWithRuleStopState.(*LexerATNConfig).lexerActionExecutor + proposed.setPrediction(l.atn.ruleToTokenType[firstConfigWithRuleStopState.GetState().GetRuleIndex()]) + } + dfa := l.decisionToDFA[l.mode] + + l.atn.stateMu.Lock() + defer l.atn.stateMu.Unlock() + existing, present := dfa.states.Get(proposed) + if present { + + // This state was already present, so just return it. + // + proposed = existing + } else { + + // We need to add the new state + // + proposed.stateNumber = dfa.states.Len() + configs.SetReadOnly(true) + proposed.configs = configs + dfa.states.Put(proposed) + } + if !suppressEdge { + dfa.setS0(proposed) + } + return proposed +} + +func (l *LexerATNSimulator) getDFA(mode int) *DFA { + return l.decisionToDFA[mode] +} + +// Get the text Matched so far for the current token. +func (l *LexerATNSimulator) GetText(input CharStream) string { + // index is first lookahead char, don't include. + return input.GetTextFromInterval(NewInterval(l.startIndex, input.Index()-1)) +} + +func (l *LexerATNSimulator) Consume(input CharStream) { + curChar := input.LA(1) + if curChar == int('\n') { + l.Line++ + l.CharPositionInLine = 0 + } else { + l.CharPositionInLine++ + } + input.Consume() +} + +func (l *LexerATNSimulator) GetCharPositionInLine() int { + return l.CharPositionInLine +} + +func (l *LexerATNSimulator) GetLine() int { + return l.Line +} + +func (l *LexerATNSimulator) GetTokenName(tt int) string { + if tt == -1 { + return "EOF" + } + + var sb strings.Builder + sb.Grow(6) + sb.WriteByte('\'') + sb.WriteRune(rune(tt)) + sb.WriteByte('\'') + + return sb.String() +} + +func resetSimState(sim *SimState) { + sim.index = -1 + sim.line = 0 + sim.column = -1 + sim.dfaState = nil +} + +type SimState struct { + index int + line int + column int + dfaState *DFAState +} + +func NewSimState() *SimState { + s := new(SimState) + resetSimState(s) + return s +} + +func (s *SimState) reset() { + resetSimState(s) +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/ll1_analyzer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/ll1_analyzer.go index 76689615a6..50a617fde6 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/ll1_analyzer.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/ll1_analyzer.go @@ -1,216 +1,216 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -type LL1Analyzer struct { - atn *ATN -} - -func NewLL1Analyzer(atn *ATN) *LL1Analyzer { - la := new(LL1Analyzer) - la.atn = atn - return la -} - -// - Special value added to the lookahead sets to indicate that we hit -// a predicate during analysis if {@code seeThruPreds==false}. -// -// / -const ( - LL1AnalyzerHitPred = TokenInvalidType -) - -// * -// Calculates the SLL(1) expected lookahead set for each outgoing transition -// of an {@link ATNState}. The returned array has one element for each -// outgoing transition in {@code s}. If the closure from transition -// i leads to a semantic predicate before Matching a symbol, the -// element at index i of the result will be {@code nil}. -// -// @param s the ATN state -// @return the expected symbols for each outgoing transition of {@code s}. -func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet { - if s == nil { - return nil - } - count := len(s.GetTransitions()) - look := make([]*IntervalSet, count) - for alt := 0; alt < count; alt++ { - look[alt] = NewIntervalSet() - lookBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst) - seeThruPreds := false // fail to get lookahead upon pred - la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), seeThruPreds, false) - // Wipe out lookahead for la alternative if we found nothing - // or we had a predicate when we !seeThruPreds - if look[alt].length() == 0 || look[alt].contains(LL1AnalyzerHitPred) { - look[alt] = nil - } - } - return look -} - -// * -// Compute set of tokens that can follow {@code s} in the ATN in the -// specified {@code ctx}. -// -//

If {@code ctx} is {@code nil} and the end of the rule containing -// {@code s} is reached, {@link Token//EPSILON} is added to the result set. -// If {@code ctx} is not {@code nil} and the end of the outermost rule is -// reached, {@link Token//EOF} is added to the result set.

-// -// @param s the ATN state -// @param stopState the ATN state to stop at. This can be a -// {@link BlockEndState} to detect epsilon paths through a closure. -// @param ctx the complete parser context, or {@code nil} if the context -// should be ignored -// -// @return The set of tokens that can follow {@code s} in the ATN in the -// specified {@code ctx}. -// / -func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet { - r := NewIntervalSet() - seeThruPreds := true // ignore preds get all lookahead - var lookContext PredictionContext - if ctx != nil { - lookContext = predictionContextFromRuleContext(s.GetATN(), ctx) - } - la.look1(s, stopState, lookContext, r, NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst), NewBitSet(), seeThruPreds, true) - return r -} - -//* -// Compute set of tokens that can follow {@code s} in the ATN in the -// specified {@code ctx}. -// -//

If {@code ctx} is {@code nil} and {@code stopState} or the end of the -// rule containing {@code s} is reached, {@link Token//EPSILON} is added to -// the result set. If {@code ctx} is not {@code nil} and {@code addEOF} is -// {@code true} and {@code stopState} or the end of the outermost rule is -// reached, {@link Token//EOF} is added to the result set.

-// -// @param s the ATN state. -// @param stopState the ATN state to stop at. This can be a -// {@link BlockEndState} to detect epsilon paths through a closure. -// @param ctx The outer context, or {@code nil} if the outer context should -// not be used. -// @param look The result lookahead set. -// @param lookBusy A set used for preventing epsilon closures in the ATN -// from causing a stack overflow. Outside code should pass -// {@code NewSet} for la argument. -// @param calledRuleStack A set used for preventing left recursion in the -// ATN from causing a stack overflow. Outside code should pass -// {@code NewBitSet()} for la argument. -// @param seeThruPreds {@code true} to true semantic predicates as -// implicitly {@code true} and "see through them", otherwise {@code false} -// to treat semantic predicates as opaque and add {@link //HitPred} to the -// result if one is encountered. -// @param addEOF Add {@link Token//EOF} to the result if the end of the -// outermost context is reached. This parameter has no effect if {@code ctx} -// is {@code nil}. - -func (la *LL1Analyzer) look2(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) { - - returnState := la.atn.states[ctx.getReturnState(i)] - la.look1(returnState, stopState, ctx.GetParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF) - -} - -func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool) { - - c := NewBaseATNConfig6(s, 0, ctx) - - if lookBusy.Contains(c) { - return - } - - _, present := lookBusy.Put(c) - if present { - return - - } - if s == stopState { - if ctx == nil { - look.addOne(TokenEpsilon) - return - } else if ctx.isEmpty() && addEOF { - look.addOne(TokenEOF) - return - } - } - - _, ok := s.(*RuleStopState) - - if ok { - if ctx == nil { - look.addOne(TokenEpsilon) - return - } else if ctx.isEmpty() && addEOF { - look.addOne(TokenEOF) - return - } - - if ctx != BasePredictionContextEMPTY { - removed := calledRuleStack.contains(s.GetRuleIndex()) - defer func() { - if removed { - calledRuleStack.add(s.GetRuleIndex()) - } - }() - calledRuleStack.remove(s.GetRuleIndex()) - // run thru all possible stack tops in ctx - for i := 0; i < ctx.length(); i++ { - returnState := la.atn.states[ctx.getReturnState(i)] - la.look2(returnState, stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, i) - } - return - } - } - - n := len(s.GetTransitions()) - - for i := 0; i < n; i++ { - t := s.GetTransitions()[i] - - if t1, ok := t.(*RuleTransition); ok { - if calledRuleStack.contains(t1.getTarget().GetRuleIndex()) { - continue - } - - newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber()) - la.look3(stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, t1) - } else if t2, ok := t.(AbstractPredicateTransition); ok { - if seeThruPreds { - la.look1(t2.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF) - } else { - look.addOne(LL1AnalyzerHitPred) - } - } else if t.getIsEpsilon() { - la.look1(t.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF) - } else if _, ok := t.(*WildcardTransition); ok { - look.addRange(TokenMinUserTokenType, la.atn.maxTokenType) - } else { - set := t.getLabel() - if set != nil { - if _, ok := t.(*NotSetTransition); ok { - set = set.complement(TokenMinUserTokenType, la.atn.maxTokenType) - } - look.addSet(set) - } - } - } -} - -func (la *LL1Analyzer) look3(stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) { - - newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber()) - - defer func() { - calledRuleStack.remove(t1.getTarget().GetRuleIndex()) - }() - - calledRuleStack.add(t1.getTarget().GetRuleIndex()) - la.look1(t1.getTarget(), stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF) - -} +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +type LL1Analyzer struct { + atn *ATN +} + +func NewLL1Analyzer(atn *ATN) *LL1Analyzer { + la := new(LL1Analyzer) + la.atn = atn + return la +} + +// - Special value added to the lookahead sets to indicate that we hit +// a predicate during analysis if {@code seeThruPreds==false}. +// +// / +const ( + LL1AnalyzerHitPred = TokenInvalidType +) + +// * +// Calculates the SLL(1) expected lookahead set for each outgoing transition +// of an {@link ATNState}. The returned array has one element for each +// outgoing transition in {@code s}. If the closure from transition +// i leads to a semantic predicate before Matching a symbol, the +// element at index i of the result will be {@code nil}. +// +// @param s the ATN state +// @return the expected symbols for each outgoing transition of {@code s}. +func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet { + if s == nil { + return nil + } + count := len(s.GetTransitions()) + look := make([]*IntervalSet, count) + for alt := 0; alt < count; alt++ { + look[alt] = NewIntervalSet() + lookBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst) + seeThruPreds := false // fail to get lookahead upon pred + la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), seeThruPreds, false) + // Wipe out lookahead for la alternative if we found nothing + // or we had a predicate when we !seeThruPreds + if look[alt].length() == 0 || look[alt].contains(LL1AnalyzerHitPred) { + look[alt] = nil + } + } + return look +} + +// * +// Compute set of tokens that can follow {@code s} in the ATN in the +// specified {@code ctx}. +// +//

If {@code ctx} is {@code nil} and the end of the rule containing +// {@code s} is reached, {@link Token//EPSILON} is added to the result set. +// If {@code ctx} is not {@code nil} and the end of the outermost rule is +// reached, {@link Token//EOF} is added to the result set.

+// +// @param s the ATN state +// @param stopState the ATN state to stop at. This can be a +// {@link BlockEndState} to detect epsilon paths through a closure. +// @param ctx the complete parser context, or {@code nil} if the context +// should be ignored +// +// @return The set of tokens that can follow {@code s} in the ATN in the +// specified {@code ctx}. +// / +func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet { + r := NewIntervalSet() + seeThruPreds := true // ignore preds get all lookahead + var lookContext PredictionContext + if ctx != nil { + lookContext = predictionContextFromRuleContext(s.GetATN(), ctx) + } + la.look1(s, stopState, lookContext, r, NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst), NewBitSet(), seeThruPreds, true) + return r +} + +//* +// Compute set of tokens that can follow {@code s} in the ATN in the +// specified {@code ctx}. +// +//

If {@code ctx} is {@code nil} and {@code stopState} or the end of the +// rule containing {@code s} is reached, {@link Token//EPSILON} is added to +// the result set. If {@code ctx} is not {@code nil} and {@code addEOF} is +// {@code true} and {@code stopState} or the end of the outermost rule is +// reached, {@link Token//EOF} is added to the result set.

+// +// @param s the ATN state. +// @param stopState the ATN state to stop at. This can be a +// {@link BlockEndState} to detect epsilon paths through a closure. +// @param ctx The outer context, or {@code nil} if the outer context should +// not be used. +// @param look The result lookahead set. +// @param lookBusy A set used for preventing epsilon closures in the ATN +// from causing a stack overflow. Outside code should pass +// {@code NewSet} for la argument. +// @param calledRuleStack A set used for preventing left recursion in the +// ATN from causing a stack overflow. Outside code should pass +// {@code NewBitSet()} for la argument. +// @param seeThruPreds {@code true} to true semantic predicates as +// implicitly {@code true} and "see through them", otherwise {@code false} +// to treat semantic predicates as opaque and add {@link //HitPred} to the +// result if one is encountered. +// @param addEOF Add {@link Token//EOF} to the result if the end of the +// outermost context is reached. This parameter has no effect if {@code ctx} +// is {@code nil}. + +func (la *LL1Analyzer) look2(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) { + + returnState := la.atn.states[ctx.getReturnState(i)] + la.look1(returnState, stopState, ctx.GetParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF) + +} + +func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool) { + + c := NewBaseATNConfig6(s, 0, ctx) + + if lookBusy.Contains(c) { + return + } + + _, present := lookBusy.Put(c) + if present { + return + + } + if s == stopState { + if ctx == nil { + look.addOne(TokenEpsilon) + return + } else if ctx.isEmpty() && addEOF { + look.addOne(TokenEOF) + return + } + } + + _, ok := s.(*RuleStopState) + + if ok { + if ctx == nil { + look.addOne(TokenEpsilon) + return + } else if ctx.isEmpty() && addEOF { + look.addOne(TokenEOF) + return + } + + if ctx != BasePredictionContextEMPTY { + removed := calledRuleStack.contains(s.GetRuleIndex()) + defer func() { + if removed { + calledRuleStack.add(s.GetRuleIndex()) + } + }() + calledRuleStack.remove(s.GetRuleIndex()) + // run thru all possible stack tops in ctx + for i := 0; i < ctx.length(); i++ { + returnState := la.atn.states[ctx.getReturnState(i)] + la.look2(returnState, stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, i) + } + return + } + } + + n := len(s.GetTransitions()) + + for i := 0; i < n; i++ { + t := s.GetTransitions()[i] + + if t1, ok := t.(*RuleTransition); ok { + if calledRuleStack.contains(t1.getTarget().GetRuleIndex()) { + continue + } + + newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber()) + la.look3(stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, t1) + } else if t2, ok := t.(AbstractPredicateTransition); ok { + if seeThruPreds { + la.look1(t2.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF) + } else { + look.addOne(LL1AnalyzerHitPred) + } + } else if t.getIsEpsilon() { + la.look1(t.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF) + } else if _, ok := t.(*WildcardTransition); ok { + look.addRange(TokenMinUserTokenType, la.atn.maxTokenType) + } else { + set := t.getLabel() + if set != nil { + if _, ok := t.(*NotSetTransition); ok { + set = set.complement(TokenMinUserTokenType, la.atn.maxTokenType) + } + look.addSet(set) + } + } + } +} + +func (la *LL1Analyzer) look3(stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *JStore[ATNConfig, Comparator[ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) { + + newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber()) + + defer func() { + calledRuleStack.remove(t1.getTarget().GetRuleIndex()) + }() + + calledRuleStack.add(t1.getTarget().GetRuleIndex()) + la.look1(t1.getTarget(), stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF) + +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser.go index d26bf06392..bd7cac50e6 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser.go @@ -1,708 +1,708 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "strconv" -) - -type Parser interface { - Recognizer - - GetInterpreter() *ParserATNSimulator - - GetTokenStream() TokenStream - GetTokenFactory() TokenFactory - GetParserRuleContext() ParserRuleContext - SetParserRuleContext(ParserRuleContext) - Consume() Token - GetParseListeners() []ParseTreeListener - - GetErrorHandler() ErrorStrategy - SetErrorHandler(ErrorStrategy) - GetInputStream() IntStream - GetCurrentToken() Token - GetExpectedTokens() *IntervalSet - NotifyErrorListeners(string, Token, RecognitionException) - IsExpectedToken(int) bool - GetPrecedence() int - GetRuleInvocationStack(ParserRuleContext) []string -} - -type BaseParser struct { - *BaseRecognizer - - Interpreter *ParserATNSimulator - BuildParseTrees bool - - input TokenStream - errHandler ErrorStrategy - precedenceStack IntStack - ctx ParserRuleContext - - tracer *TraceListener - parseListeners []ParseTreeListener - _SyntaxErrors int -} - -// p.is all the parsing support code essentially most of it is error -// recovery stuff.// -func NewBaseParser(input TokenStream) *BaseParser { - - p := new(BaseParser) - - p.BaseRecognizer = NewBaseRecognizer() - - // The input stream. - p.input = nil - // The error handling strategy for the parser. The default value is a new - // instance of {@link DefaultErrorStrategy}. - p.errHandler = NewDefaultErrorStrategy() - p.precedenceStack = make([]int, 0) - p.precedenceStack.Push(0) - // The {@link ParserRuleContext} object for the currently executing rule. - // p.is always non-nil during the parsing process. - p.ctx = nil - // Specifies whether or not the parser should construct a parse tree during - // the parsing process. The default value is {@code true}. - p.BuildParseTrees = true - // When {@link //setTrace}{@code (true)} is called, a reference to the - // {@link TraceListener} is stored here so it can be easily removed in a - // later call to {@link //setTrace}{@code (false)}. The listener itself is - // implemented as a parser listener so p.field is not directly used by - // other parser methods. - p.tracer = nil - // The list of {@link ParseTreeListener} listeners registered to receive - // events during the parse. - p.parseListeners = nil - // The number of syntax errors Reported during parsing. p.value is - // incremented each time {@link //NotifyErrorListeners} is called. - p._SyntaxErrors = 0 - p.SetInputStream(input) - - return p -} - -// p.field maps from the serialized ATN string to the deserialized {@link -// ATN} with -// bypass alternatives. -// -// @see ATNDeserializationOptions//isGenerateRuleBypassTransitions() -var bypassAltsAtnCache = make(map[string]int) - -// reset the parser's state// -func (p *BaseParser) reset() { - if p.input != nil { - p.input.Seek(0) - } - p.errHandler.reset(p) - p.ctx = nil - p._SyntaxErrors = 0 - p.SetTrace(nil) - p.precedenceStack = make([]int, 0) - p.precedenceStack.Push(0) - if p.Interpreter != nil { - p.Interpreter.reset() - } -} - -func (p *BaseParser) GetErrorHandler() ErrorStrategy { - return p.errHandler -} - -func (p *BaseParser) SetErrorHandler(e ErrorStrategy) { - p.errHandler = e -} - -// Match current input symbol against {@code ttype}. If the symbol type -// Matches, {@link ANTLRErrorStrategy//ReportMatch} and {@link //consume} are -// called to complete the Match process. -// -//

If the symbol type does not Match, -// {@link ANTLRErrorStrategy//recoverInline} is called on the current error -// strategy to attempt recovery. If {@link //getBuildParseTree} is -// {@code true} and the token index of the symbol returned by -// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to -// the parse tree by calling {@link ParserRuleContext//addErrorNode}.

-// -// @param ttype the token type to Match -// @return the Matched symbol -// @panics RecognitionException if the current input symbol did not Match -// {@code ttype} and the error strategy could not recover from the -// mismatched symbol - -func (p *BaseParser) Match(ttype int) Token { - - t := p.GetCurrentToken() - - if t.GetTokenType() == ttype { - p.errHandler.ReportMatch(p) - p.Consume() - } else { - t = p.errHandler.RecoverInline(p) - if p.BuildParseTrees && t.GetTokenIndex() == -1 { - // we must have conjured up a Newtoken during single token - // insertion - // if it's not the current symbol - p.ctx.AddErrorNode(t) - } - } - - return t -} - -// Match current input symbol as a wildcard. If the symbol type Matches -// (i.e. has a value greater than 0), {@link ANTLRErrorStrategy//ReportMatch} -// and {@link //consume} are called to complete the Match process. -// -//

If the symbol type does not Match, -// {@link ANTLRErrorStrategy//recoverInline} is called on the current error -// strategy to attempt recovery. If {@link //getBuildParseTree} is -// {@code true} and the token index of the symbol returned by -// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to -// the parse tree by calling {@link ParserRuleContext//addErrorNode}.

-// -// @return the Matched symbol -// @panics RecognitionException if the current input symbol did not Match -// a wildcard and the error strategy could not recover from the mismatched -// symbol - -func (p *BaseParser) MatchWildcard() Token { - t := p.GetCurrentToken() - if t.GetTokenType() > 0 { - p.errHandler.ReportMatch(p) - p.Consume() - } else { - t = p.errHandler.RecoverInline(p) - if p.BuildParseTrees && t.GetTokenIndex() == -1 { - // we must have conjured up a Newtoken during single token - // insertion - // if it's not the current symbol - p.ctx.AddErrorNode(t) - } - } - return t -} - -func (p *BaseParser) GetParserRuleContext() ParserRuleContext { - return p.ctx -} - -func (p *BaseParser) SetParserRuleContext(v ParserRuleContext) { - p.ctx = v -} - -func (p *BaseParser) GetParseListeners() []ParseTreeListener { - if p.parseListeners == nil { - return make([]ParseTreeListener, 0) - } - return p.parseListeners -} - -// Registers {@code listener} to receive events during the parsing process. -// -//

To support output-preserving grammar transformations (including but not -// limited to left-recursion removal, automated left-factoring, and -// optimized code generation), calls to listener methods during the parse -// may differ substantially from calls made by -// {@link ParseTreeWalker//DEFAULT} used after the parse is complete. In -// particular, rule entry and exit events may occur in a different order -// during the parse than after the parser. In addition, calls to certain -// rule entry methods may be omitted.

-// -//

With the following specific exceptions, calls to listener events are -// deterministic, i.e. for identical input the calls to listener -// methods will be the same.

-// -//
    -//
  • Alterations to the grammar used to generate code may change the -// behavior of the listener calls.
  • -//
  • Alterations to the command line options passed to ANTLR 4 when -// generating the parser may change the behavior of the listener calls.
  • -//
  • Changing the version of the ANTLR Tool used to generate the parser -// may change the behavior of the listener calls.
  • -//
-// -// @param listener the listener to add -// -// @panics nilPointerException if {@code} listener is {@code nil} -func (p *BaseParser) AddParseListener(listener ParseTreeListener) { - if listener == nil { - panic("listener") - } - if p.parseListeners == nil { - p.parseListeners = make([]ParseTreeListener, 0) - } - p.parseListeners = append(p.parseListeners, listener) -} - -// Remove {@code listener} from the list of parse listeners. -// -//

If {@code listener} is {@code nil} or has not been added as a parse -// listener, p.method does nothing.

-// @param listener the listener to remove -func (p *BaseParser) RemoveParseListener(listener ParseTreeListener) { - - if p.parseListeners != nil { - - idx := -1 - for i, v := range p.parseListeners { - if v == listener { - idx = i - break - } - } - - if idx == -1 { - return - } - - // remove the listener from the slice - p.parseListeners = append(p.parseListeners[0:idx], p.parseListeners[idx+1:]...) - - if len(p.parseListeners) == 0 { - p.parseListeners = nil - } - } -} - -// Remove all parse listeners. -func (p *BaseParser) removeParseListeners() { - p.parseListeners = nil -} - -// Notify any parse listeners of an enter rule event. -func (p *BaseParser) TriggerEnterRuleEvent() { - if p.parseListeners != nil { - ctx := p.ctx - for _, listener := range p.parseListeners { - listener.EnterEveryRule(ctx) - ctx.EnterRule(listener) - } - } -} - -// Notify any parse listeners of an exit rule event. -// -// @see //addParseListener -func (p *BaseParser) TriggerExitRuleEvent() { - if p.parseListeners != nil { - // reverse order walk of listeners - ctx := p.ctx - l := len(p.parseListeners) - 1 - - for i := range p.parseListeners { - listener := p.parseListeners[l-i] - ctx.ExitRule(listener) - listener.ExitEveryRule(ctx) - } - } -} - -func (p *BaseParser) GetInterpreter() *ParserATNSimulator { - return p.Interpreter -} - -func (p *BaseParser) GetATN() *ATN { - return p.Interpreter.atn -} - -func (p *BaseParser) GetTokenFactory() TokenFactory { - return p.input.GetTokenSource().GetTokenFactory() -} - -// Tell our token source and error strategy about a Newway to create tokens.// -func (p *BaseParser) setTokenFactory(factory TokenFactory) { - p.input.GetTokenSource().setTokenFactory(factory) -} - -// The ATN with bypass alternatives is expensive to create so we create it -// lazily. -// -// @panics UnsupportedOperationException if the current parser does not -// implement the {@link //getSerializedATN()} method. -func (p *BaseParser) GetATNWithBypassAlts() { - - // TODO - panic("Not implemented!") - - // serializedAtn := p.getSerializedATN() - // if (serializedAtn == nil) { - // panic("The current parser does not support an ATN with bypass alternatives.") - // } - // result := p.bypassAltsAtnCache[serializedAtn] - // if (result == nil) { - // deserializationOptions := NewATNDeserializationOptions(nil) - // deserializationOptions.generateRuleBypassTransitions = true - // result = NewATNDeserializer(deserializationOptions).deserialize(serializedAtn) - // p.bypassAltsAtnCache[serializedAtn] = result - // } - // return result -} - -// The preferred method of getting a tree pattern. For example, here's a -// sample use: -// -//
-// ParseTree t = parser.expr()
-// ParseTreePattern p = parser.compileParseTreePattern("<ID>+0",
-// MyParser.RULE_expr)
-// ParseTreeMatch m = p.Match(t)
-// String id = m.Get("ID")
-// 
- -func (p *BaseParser) compileParseTreePattern(pattern, patternRuleIndex, lexer Lexer) { - - panic("NewParseTreePatternMatcher not implemented!") - // - // if (lexer == nil) { - // if (p.GetTokenStream() != nil) { - // tokenSource := p.GetTokenStream().GetTokenSource() - // if _, ok := tokenSource.(ILexer); ok { - // lexer = tokenSource - // } - // } - // } - // if (lexer == nil) { - // panic("Parser can't discover a lexer to use") - // } - - // m := NewParseTreePatternMatcher(lexer, p) - // return m.compile(pattern, patternRuleIndex) -} - -func (p *BaseParser) GetInputStream() IntStream { - return p.GetTokenStream() -} - -func (p *BaseParser) SetInputStream(input TokenStream) { - p.SetTokenStream(input) -} - -func (p *BaseParser) GetTokenStream() TokenStream { - return p.input -} - -// Set the token stream and reset the parser.// -func (p *BaseParser) SetTokenStream(input TokenStream) { - p.input = nil - p.reset() - p.input = input -} - -// Match needs to return the current input symbol, which gets put -// into the label for the associated token ref e.g., x=ID. -func (p *BaseParser) GetCurrentToken() Token { - return p.input.LT(1) -} - -func (p *BaseParser) NotifyErrorListeners(msg string, offendingToken Token, err RecognitionException) { - if offendingToken == nil { - offendingToken = p.GetCurrentToken() - } - p._SyntaxErrors++ - line := offendingToken.GetLine() - column := offendingToken.GetColumn() - listener := p.GetErrorListenerDispatch() - listener.SyntaxError(p, offendingToken, line, column, msg, err) -} - -func (p *BaseParser) Consume() Token { - o := p.GetCurrentToken() - if o.GetTokenType() != TokenEOF { - p.GetInputStream().Consume() - } - hasListener := p.parseListeners != nil && len(p.parseListeners) > 0 - if p.BuildParseTrees || hasListener { - if p.errHandler.InErrorRecoveryMode(p) { - node := p.ctx.AddErrorNode(o) - if p.parseListeners != nil { - for _, l := range p.parseListeners { - l.VisitErrorNode(node) - } - } - - } else { - node := p.ctx.AddTokenNode(o) - if p.parseListeners != nil { - for _, l := range p.parseListeners { - l.VisitTerminal(node) - } - } - } - // node.invokingState = p.state - } - - return o -} - -func (p *BaseParser) addContextToParseTree() { - // add current context to parent if we have a parent - if p.ctx.GetParent() != nil { - p.ctx.GetParent().(ParserRuleContext).AddChild(p.ctx) - } -} - -func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, ruleIndex int) { - p.SetState(state) - p.ctx = localctx - p.ctx.SetStart(p.input.LT(1)) - if p.BuildParseTrees { - p.addContextToParseTree() - } - if p.parseListeners != nil { - p.TriggerEnterRuleEvent() - } -} - -func (p *BaseParser) ExitRule() { - p.ctx.SetStop(p.input.LT(-1)) - // trigger event on ctx, before it reverts to parent - if p.parseListeners != nil { - p.TriggerExitRuleEvent() - } - p.SetState(p.ctx.GetInvokingState()) - if p.ctx.GetParent() != nil { - p.ctx = p.ctx.GetParent().(ParserRuleContext) - } else { - p.ctx = nil - } -} - -func (p *BaseParser) EnterOuterAlt(localctx ParserRuleContext, altNum int) { - localctx.SetAltNumber(altNum) - // if we have Newlocalctx, make sure we replace existing ctx - // that is previous child of parse tree - if p.BuildParseTrees && p.ctx != localctx { - if p.ctx.GetParent() != nil { - p.ctx.GetParent().(ParserRuleContext).RemoveLastChild() - p.ctx.GetParent().(ParserRuleContext).AddChild(localctx) - } - } - p.ctx = localctx -} - -// Get the precedence level for the top-most precedence rule. -// -// @return The precedence level for the top-most precedence rule, or -1 if -// the parser context is not nested within a precedence rule. - -func (p *BaseParser) GetPrecedence() int { - if len(p.precedenceStack) == 0 { - return -1 - } - - return p.precedenceStack[len(p.precedenceStack)-1] -} - -func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, ruleIndex, precedence int) { - p.SetState(state) - p.precedenceStack.Push(precedence) - p.ctx = localctx - p.ctx.SetStart(p.input.LT(1)) - if p.parseListeners != nil { - p.TriggerEnterRuleEvent() // simulates rule entry for - // left-recursive rules - } -} - -// -// Like {@link //EnterRule} but for recursive rules. - -func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, ruleIndex int) { - previous := p.ctx - previous.SetParent(localctx) - previous.SetInvokingState(state) - previous.SetStop(p.input.LT(-1)) - - p.ctx = localctx - p.ctx.SetStart(previous.GetStart()) - if p.BuildParseTrees { - p.ctx.AddChild(previous) - } - if p.parseListeners != nil { - p.TriggerEnterRuleEvent() // simulates rule entry for - // left-recursive rules - } -} - -func (p *BaseParser) UnrollRecursionContexts(parentCtx ParserRuleContext) { - p.precedenceStack.Pop() - p.ctx.SetStop(p.input.LT(-1)) - retCtx := p.ctx // save current ctx (return value) - // unroll so ctx is as it was before call to recursive method - if p.parseListeners != nil { - for p.ctx != parentCtx { - p.TriggerExitRuleEvent() - p.ctx = p.ctx.GetParent().(ParserRuleContext) - } - } else { - p.ctx = parentCtx - } - // hook into tree - retCtx.SetParent(parentCtx) - if p.BuildParseTrees && parentCtx != nil { - // add return ctx into invoking rule's tree - parentCtx.AddChild(retCtx) - } -} - -func (p *BaseParser) GetInvokingContext(ruleIndex int) ParserRuleContext { - ctx := p.ctx - for ctx != nil { - if ctx.GetRuleIndex() == ruleIndex { - return ctx - } - ctx = ctx.GetParent().(ParserRuleContext) - } - return nil -} - -func (p *BaseParser) Precpred(localctx RuleContext, precedence int) bool { - return precedence >= p.precedenceStack[len(p.precedenceStack)-1] -} - -func (p *BaseParser) inContext(context ParserRuleContext) bool { - // TODO: useful in parser? - return false -} - -// -// Checks whether or not {@code symbol} can follow the current state in the -// ATN. The behavior of p.method is equivalent to the following, but is -// implemented such that the complete context-sensitive follow set does not -// need to be explicitly constructed. -// -//
-// return getExpectedTokens().contains(symbol)
-// 
-// -// @param symbol the symbol type to check -// @return {@code true} if {@code symbol} can follow the current state in -// the ATN, otherwise {@code false}. - -func (p *BaseParser) IsExpectedToken(symbol int) bool { - atn := p.Interpreter.atn - ctx := p.ctx - s := atn.states[p.state] - following := atn.NextTokens(s, nil) - if following.contains(symbol) { - return true - } - if !following.contains(TokenEpsilon) { - return false - } - for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) { - invokingState := atn.states[ctx.GetInvokingState()] - rt := invokingState.GetTransitions()[0] - following = atn.NextTokens(rt.(*RuleTransition).followState, nil) - if following.contains(symbol) { - return true - } - ctx = ctx.GetParent().(ParserRuleContext) - } - if following.contains(TokenEpsilon) && symbol == TokenEOF { - return true - } - - return false -} - -// Computes the set of input symbols which could follow the current parser -// state and context, as given by {@link //GetState} and {@link //GetContext}, -// respectively. -// -// @see ATN//getExpectedTokens(int, RuleContext) -func (p *BaseParser) GetExpectedTokens() *IntervalSet { - return p.Interpreter.atn.getExpectedTokens(p.state, p.ctx) -} - -func (p *BaseParser) GetExpectedTokensWithinCurrentRule() *IntervalSet { - atn := p.Interpreter.atn - s := atn.states[p.state] - return atn.NextTokens(s, nil) -} - -// Get a rule's index (i.e., {@code RULE_ruleName} field) or -1 if not found.// -func (p *BaseParser) GetRuleIndex(ruleName string) int { - var ruleIndex, ok = p.GetRuleIndexMap()[ruleName] - if ok { - return ruleIndex - } - - return -1 -} - -// Return List<String> of the rule names in your parser instance -// leading up to a call to the current rule. You could override if -// you want more details such as the file/line info of where -// in the ATN a rule is invoked. -// -// this very useful for error messages. - -func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string { - if c == nil { - c = p.ctx - } - stack := make([]string, 0) - for c != nil { - // compute what follows who invoked us - ruleIndex := c.GetRuleIndex() - if ruleIndex < 0 { - stack = append(stack, "n/a") - } else { - stack = append(stack, p.GetRuleNames()[ruleIndex]) - } - - vp := c.GetParent() - - if vp == nil { - break - } - - c = vp.(ParserRuleContext) - } - return stack -} - -// For debugging and other purposes.// -func (p *BaseParser) GetDFAStrings() string { - return fmt.Sprint(p.Interpreter.decisionToDFA) -} - -// For debugging and other purposes.// -func (p *BaseParser) DumpDFA() { - seenOne := false - for _, dfa := range p.Interpreter.decisionToDFA { - if dfa.states.Len() > 0 { - if seenOne { - fmt.Println() - } - fmt.Println("Decision " + strconv.Itoa(dfa.decision) + ":") - fmt.Print(dfa.String(p.LiteralNames, p.SymbolicNames)) - seenOne = true - } - } -} - -func (p *BaseParser) GetSourceName() string { - return p.GrammarFileName -} - -// During a parse is sometimes useful to listen in on the rule entry and exit -// events as well as token Matches. p.is for quick and dirty debugging. -func (p *BaseParser) SetTrace(trace *TraceListener) { - if trace == nil { - p.RemoveParseListener(p.tracer) - p.tracer = nil - } else { - if p.tracer != nil { - p.RemoveParseListener(p.tracer) - } - p.tracer = NewTraceListener(p) - p.AddParseListener(p.tracer) - } -} +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" +) + +type Parser interface { + Recognizer + + GetInterpreter() *ParserATNSimulator + + GetTokenStream() TokenStream + GetTokenFactory() TokenFactory + GetParserRuleContext() ParserRuleContext + SetParserRuleContext(ParserRuleContext) + Consume() Token + GetParseListeners() []ParseTreeListener + + GetErrorHandler() ErrorStrategy + SetErrorHandler(ErrorStrategy) + GetInputStream() IntStream + GetCurrentToken() Token + GetExpectedTokens() *IntervalSet + NotifyErrorListeners(string, Token, RecognitionException) + IsExpectedToken(int) bool + GetPrecedence() int + GetRuleInvocationStack(ParserRuleContext) []string +} + +type BaseParser struct { + *BaseRecognizer + + Interpreter *ParserATNSimulator + BuildParseTrees bool + + input TokenStream + errHandler ErrorStrategy + precedenceStack IntStack + ctx ParserRuleContext + + tracer *TraceListener + parseListeners []ParseTreeListener + _SyntaxErrors int +} + +// p.is all the parsing support code essentially most of it is error +// recovery stuff.// +func NewBaseParser(input TokenStream) *BaseParser { + + p := new(BaseParser) + + p.BaseRecognizer = NewBaseRecognizer() + + // The input stream. + p.input = nil + // The error handling strategy for the parser. The default value is a new + // instance of {@link DefaultErrorStrategy}. + p.errHandler = NewDefaultErrorStrategy() + p.precedenceStack = make([]int, 0) + p.precedenceStack.Push(0) + // The {@link ParserRuleContext} object for the currently executing rule. + // p.is always non-nil during the parsing process. + p.ctx = nil + // Specifies whether or not the parser should construct a parse tree during + // the parsing process. The default value is {@code true}. + p.BuildParseTrees = true + // When {@link //setTrace}{@code (true)} is called, a reference to the + // {@link TraceListener} is stored here so it can be easily removed in a + // later call to {@link //setTrace}{@code (false)}. The listener itself is + // implemented as a parser listener so p.field is not directly used by + // other parser methods. + p.tracer = nil + // The list of {@link ParseTreeListener} listeners registered to receive + // events during the parse. + p.parseListeners = nil + // The number of syntax errors Reported during parsing. p.value is + // incremented each time {@link //NotifyErrorListeners} is called. + p._SyntaxErrors = 0 + p.SetInputStream(input) + + return p +} + +// p.field maps from the serialized ATN string to the deserialized {@link +// ATN} with +// bypass alternatives. +// +// @see ATNDeserializationOptions//isGenerateRuleBypassTransitions() +var bypassAltsAtnCache = make(map[string]int) + +// reset the parser's state// +func (p *BaseParser) reset() { + if p.input != nil { + p.input.Seek(0) + } + p.errHandler.reset(p) + p.ctx = nil + p._SyntaxErrors = 0 + p.SetTrace(nil) + p.precedenceStack = make([]int, 0) + p.precedenceStack.Push(0) + if p.Interpreter != nil { + p.Interpreter.reset() + } +} + +func (p *BaseParser) GetErrorHandler() ErrorStrategy { + return p.errHandler +} + +func (p *BaseParser) SetErrorHandler(e ErrorStrategy) { + p.errHandler = e +} + +// Match current input symbol against {@code ttype}. If the symbol type +// Matches, {@link ANTLRErrorStrategy//ReportMatch} and {@link //consume} are +// called to complete the Match process. +// +//

If the symbol type does not Match, +// {@link ANTLRErrorStrategy//recoverInline} is called on the current error +// strategy to attempt recovery. If {@link //getBuildParseTree} is +// {@code true} and the token index of the symbol returned by +// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to +// the parse tree by calling {@link ParserRuleContext//addErrorNode}.

+// +// @param ttype the token type to Match +// @return the Matched symbol +// @panics RecognitionException if the current input symbol did not Match +// {@code ttype} and the error strategy could not recover from the +// mismatched symbol + +func (p *BaseParser) Match(ttype int) Token { + + t := p.GetCurrentToken() + + if t.GetTokenType() == ttype { + p.errHandler.ReportMatch(p) + p.Consume() + } else { + t = p.errHandler.RecoverInline(p) + if p.BuildParseTrees && t.GetTokenIndex() == -1 { + // we must have conjured up a Newtoken during single token + // insertion + // if it's not the current symbol + p.ctx.AddErrorNode(t) + } + } + + return t +} + +// Match current input symbol as a wildcard. If the symbol type Matches +// (i.e. has a value greater than 0), {@link ANTLRErrorStrategy//ReportMatch} +// and {@link //consume} are called to complete the Match process. +// +//

If the symbol type does not Match, +// {@link ANTLRErrorStrategy//recoverInline} is called on the current error +// strategy to attempt recovery. If {@link //getBuildParseTree} is +// {@code true} and the token index of the symbol returned by +// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to +// the parse tree by calling {@link ParserRuleContext//addErrorNode}.

+// +// @return the Matched symbol +// @panics RecognitionException if the current input symbol did not Match +// a wildcard and the error strategy could not recover from the mismatched +// symbol + +func (p *BaseParser) MatchWildcard() Token { + t := p.GetCurrentToken() + if t.GetTokenType() > 0 { + p.errHandler.ReportMatch(p) + p.Consume() + } else { + t = p.errHandler.RecoverInline(p) + if p.BuildParseTrees && t.GetTokenIndex() == -1 { + // we must have conjured up a Newtoken during single token + // insertion + // if it's not the current symbol + p.ctx.AddErrorNode(t) + } + } + return t +} + +func (p *BaseParser) GetParserRuleContext() ParserRuleContext { + return p.ctx +} + +func (p *BaseParser) SetParserRuleContext(v ParserRuleContext) { + p.ctx = v +} + +func (p *BaseParser) GetParseListeners() []ParseTreeListener { + if p.parseListeners == nil { + return make([]ParseTreeListener, 0) + } + return p.parseListeners +} + +// Registers {@code listener} to receive events during the parsing process. +// +//

To support output-preserving grammar transformations (including but not +// limited to left-recursion removal, automated left-factoring, and +// optimized code generation), calls to listener methods during the parse +// may differ substantially from calls made by +// {@link ParseTreeWalker//DEFAULT} used after the parse is complete. In +// particular, rule entry and exit events may occur in a different order +// during the parse than after the parser. In addition, calls to certain +// rule entry methods may be omitted.

+// +//

With the following specific exceptions, calls to listener events are +// deterministic, i.e. for identical input the calls to listener +// methods will be the same.

+// +//
    +//
  • Alterations to the grammar used to generate code may change the +// behavior of the listener calls.
  • +//
  • Alterations to the command line options passed to ANTLR 4 when +// generating the parser may change the behavior of the listener calls.
  • +//
  • Changing the version of the ANTLR Tool used to generate the parser +// may change the behavior of the listener calls.
  • +//
+// +// @param listener the listener to add +// +// @panics nilPointerException if {@code} listener is {@code nil} +func (p *BaseParser) AddParseListener(listener ParseTreeListener) { + if listener == nil { + panic("listener") + } + if p.parseListeners == nil { + p.parseListeners = make([]ParseTreeListener, 0) + } + p.parseListeners = append(p.parseListeners, listener) +} + +// Remove {@code listener} from the list of parse listeners. +// +//

If {@code listener} is {@code nil} or has not been added as a parse +// listener, p.method does nothing.

+// @param listener the listener to remove +func (p *BaseParser) RemoveParseListener(listener ParseTreeListener) { + + if p.parseListeners != nil { + + idx := -1 + for i, v := range p.parseListeners { + if v == listener { + idx = i + break + } + } + + if idx == -1 { + return + } + + // remove the listener from the slice + p.parseListeners = append(p.parseListeners[0:idx], p.parseListeners[idx+1:]...) + + if len(p.parseListeners) == 0 { + p.parseListeners = nil + } + } +} + +// Remove all parse listeners. +func (p *BaseParser) removeParseListeners() { + p.parseListeners = nil +} + +// Notify any parse listeners of an enter rule event. +func (p *BaseParser) TriggerEnterRuleEvent() { + if p.parseListeners != nil { + ctx := p.ctx + for _, listener := range p.parseListeners { + listener.EnterEveryRule(ctx) + ctx.EnterRule(listener) + } + } +} + +// Notify any parse listeners of an exit rule event. +// +// @see //addParseListener +func (p *BaseParser) TriggerExitRuleEvent() { + if p.parseListeners != nil { + // reverse order walk of listeners + ctx := p.ctx + l := len(p.parseListeners) - 1 + + for i := range p.parseListeners { + listener := p.parseListeners[l-i] + ctx.ExitRule(listener) + listener.ExitEveryRule(ctx) + } + } +} + +func (p *BaseParser) GetInterpreter() *ParserATNSimulator { + return p.Interpreter +} + +func (p *BaseParser) GetATN() *ATN { + return p.Interpreter.atn +} + +func (p *BaseParser) GetTokenFactory() TokenFactory { + return p.input.GetTokenSource().GetTokenFactory() +} + +// Tell our token source and error strategy about a Newway to create tokens.// +func (p *BaseParser) setTokenFactory(factory TokenFactory) { + p.input.GetTokenSource().setTokenFactory(factory) +} + +// The ATN with bypass alternatives is expensive to create so we create it +// lazily. +// +// @panics UnsupportedOperationException if the current parser does not +// implement the {@link //getSerializedATN()} method. +func (p *BaseParser) GetATNWithBypassAlts() { + + // TODO + panic("Not implemented!") + + // serializedAtn := p.getSerializedATN() + // if (serializedAtn == nil) { + // panic("The current parser does not support an ATN with bypass alternatives.") + // } + // result := p.bypassAltsAtnCache[serializedAtn] + // if (result == nil) { + // deserializationOptions := NewATNDeserializationOptions(nil) + // deserializationOptions.generateRuleBypassTransitions = true + // result = NewATNDeserializer(deserializationOptions).deserialize(serializedAtn) + // p.bypassAltsAtnCache[serializedAtn] = result + // } + // return result +} + +// The preferred method of getting a tree pattern. For example, here's a +// sample use: +// +//
+// ParseTree t = parser.expr()
+// ParseTreePattern p = parser.compileParseTreePattern("<ID>+0",
+// MyParser.RULE_expr)
+// ParseTreeMatch m = p.Match(t)
+// String id = m.Get("ID")
+// 
+ +func (p *BaseParser) compileParseTreePattern(pattern, patternRuleIndex, lexer Lexer) { + + panic("NewParseTreePatternMatcher not implemented!") + // + // if (lexer == nil) { + // if (p.GetTokenStream() != nil) { + // tokenSource := p.GetTokenStream().GetTokenSource() + // if _, ok := tokenSource.(ILexer); ok { + // lexer = tokenSource + // } + // } + // } + // if (lexer == nil) { + // panic("Parser can't discover a lexer to use") + // } + + // m := NewParseTreePatternMatcher(lexer, p) + // return m.compile(pattern, patternRuleIndex) +} + +func (p *BaseParser) GetInputStream() IntStream { + return p.GetTokenStream() +} + +func (p *BaseParser) SetInputStream(input TokenStream) { + p.SetTokenStream(input) +} + +func (p *BaseParser) GetTokenStream() TokenStream { + return p.input +} + +// Set the token stream and reset the parser.// +func (p *BaseParser) SetTokenStream(input TokenStream) { + p.input = nil + p.reset() + p.input = input +} + +// Match needs to return the current input symbol, which gets put +// into the label for the associated token ref e.g., x=ID. +func (p *BaseParser) GetCurrentToken() Token { + return p.input.LT(1) +} + +func (p *BaseParser) NotifyErrorListeners(msg string, offendingToken Token, err RecognitionException) { + if offendingToken == nil { + offendingToken = p.GetCurrentToken() + } + p._SyntaxErrors++ + line := offendingToken.GetLine() + column := offendingToken.GetColumn() + listener := p.GetErrorListenerDispatch() + listener.SyntaxError(p, offendingToken, line, column, msg, err) +} + +func (p *BaseParser) Consume() Token { + o := p.GetCurrentToken() + if o.GetTokenType() != TokenEOF { + p.GetInputStream().Consume() + } + hasListener := p.parseListeners != nil && len(p.parseListeners) > 0 + if p.BuildParseTrees || hasListener { + if p.errHandler.InErrorRecoveryMode(p) { + node := p.ctx.AddErrorNode(o) + if p.parseListeners != nil { + for _, l := range p.parseListeners { + l.VisitErrorNode(node) + } + } + + } else { + node := p.ctx.AddTokenNode(o) + if p.parseListeners != nil { + for _, l := range p.parseListeners { + l.VisitTerminal(node) + } + } + } + // node.invokingState = p.state + } + + return o +} + +func (p *BaseParser) addContextToParseTree() { + // add current context to parent if we have a parent + if p.ctx.GetParent() != nil { + p.ctx.GetParent().(ParserRuleContext).AddChild(p.ctx) + } +} + +func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, ruleIndex int) { + p.SetState(state) + p.ctx = localctx + p.ctx.SetStart(p.input.LT(1)) + if p.BuildParseTrees { + p.addContextToParseTree() + } + if p.parseListeners != nil { + p.TriggerEnterRuleEvent() + } +} + +func (p *BaseParser) ExitRule() { + p.ctx.SetStop(p.input.LT(-1)) + // trigger event on ctx, before it reverts to parent + if p.parseListeners != nil { + p.TriggerExitRuleEvent() + } + p.SetState(p.ctx.GetInvokingState()) + if p.ctx.GetParent() != nil { + p.ctx = p.ctx.GetParent().(ParserRuleContext) + } else { + p.ctx = nil + } +} + +func (p *BaseParser) EnterOuterAlt(localctx ParserRuleContext, altNum int) { + localctx.SetAltNumber(altNum) + // if we have Newlocalctx, make sure we replace existing ctx + // that is previous child of parse tree + if p.BuildParseTrees && p.ctx != localctx { + if p.ctx.GetParent() != nil { + p.ctx.GetParent().(ParserRuleContext).RemoveLastChild() + p.ctx.GetParent().(ParserRuleContext).AddChild(localctx) + } + } + p.ctx = localctx +} + +// Get the precedence level for the top-most precedence rule. +// +// @return The precedence level for the top-most precedence rule, or -1 if +// the parser context is not nested within a precedence rule. + +func (p *BaseParser) GetPrecedence() int { + if len(p.precedenceStack) == 0 { + return -1 + } + + return p.precedenceStack[len(p.precedenceStack)-1] +} + +func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, ruleIndex, precedence int) { + p.SetState(state) + p.precedenceStack.Push(precedence) + p.ctx = localctx + p.ctx.SetStart(p.input.LT(1)) + if p.parseListeners != nil { + p.TriggerEnterRuleEvent() // simulates rule entry for + // left-recursive rules + } +} + +// +// Like {@link //EnterRule} but for recursive rules. + +func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, ruleIndex int) { + previous := p.ctx + previous.SetParent(localctx) + previous.SetInvokingState(state) + previous.SetStop(p.input.LT(-1)) + + p.ctx = localctx + p.ctx.SetStart(previous.GetStart()) + if p.BuildParseTrees { + p.ctx.AddChild(previous) + } + if p.parseListeners != nil { + p.TriggerEnterRuleEvent() // simulates rule entry for + // left-recursive rules + } +} + +func (p *BaseParser) UnrollRecursionContexts(parentCtx ParserRuleContext) { + p.precedenceStack.Pop() + p.ctx.SetStop(p.input.LT(-1)) + retCtx := p.ctx // save current ctx (return value) + // unroll so ctx is as it was before call to recursive method + if p.parseListeners != nil { + for p.ctx != parentCtx { + p.TriggerExitRuleEvent() + p.ctx = p.ctx.GetParent().(ParserRuleContext) + } + } else { + p.ctx = parentCtx + } + // hook into tree + retCtx.SetParent(parentCtx) + if p.BuildParseTrees && parentCtx != nil { + // add return ctx into invoking rule's tree + parentCtx.AddChild(retCtx) + } +} + +func (p *BaseParser) GetInvokingContext(ruleIndex int) ParserRuleContext { + ctx := p.ctx + for ctx != nil { + if ctx.GetRuleIndex() == ruleIndex { + return ctx + } + ctx = ctx.GetParent().(ParserRuleContext) + } + return nil +} + +func (p *BaseParser) Precpred(localctx RuleContext, precedence int) bool { + return precedence >= p.precedenceStack[len(p.precedenceStack)-1] +} + +func (p *BaseParser) inContext(context ParserRuleContext) bool { + // TODO: useful in parser? + return false +} + +// +// Checks whether or not {@code symbol} can follow the current state in the +// ATN. The behavior of p.method is equivalent to the following, but is +// implemented such that the complete context-sensitive follow set does not +// need to be explicitly constructed. +// +//
+// return getExpectedTokens().contains(symbol)
+// 
+// +// @param symbol the symbol type to check +// @return {@code true} if {@code symbol} can follow the current state in +// the ATN, otherwise {@code false}. + +func (p *BaseParser) IsExpectedToken(symbol int) bool { + atn := p.Interpreter.atn + ctx := p.ctx + s := atn.states[p.state] + following := atn.NextTokens(s, nil) + if following.contains(symbol) { + return true + } + if !following.contains(TokenEpsilon) { + return false + } + for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) { + invokingState := atn.states[ctx.GetInvokingState()] + rt := invokingState.GetTransitions()[0] + following = atn.NextTokens(rt.(*RuleTransition).followState, nil) + if following.contains(symbol) { + return true + } + ctx = ctx.GetParent().(ParserRuleContext) + } + if following.contains(TokenEpsilon) && symbol == TokenEOF { + return true + } + + return false +} + +// Computes the set of input symbols which could follow the current parser +// state and context, as given by {@link //GetState} and {@link //GetContext}, +// respectively. +// +// @see ATN//getExpectedTokens(int, RuleContext) +func (p *BaseParser) GetExpectedTokens() *IntervalSet { + return p.Interpreter.atn.getExpectedTokens(p.state, p.ctx) +} + +func (p *BaseParser) GetExpectedTokensWithinCurrentRule() *IntervalSet { + atn := p.Interpreter.atn + s := atn.states[p.state] + return atn.NextTokens(s, nil) +} + +// Get a rule's index (i.e., {@code RULE_ruleName} field) or -1 if not found.// +func (p *BaseParser) GetRuleIndex(ruleName string) int { + var ruleIndex, ok = p.GetRuleIndexMap()[ruleName] + if ok { + return ruleIndex + } + + return -1 +} + +// Return List<String> of the rule names in your parser instance +// leading up to a call to the current rule. You could override if +// you want more details such as the file/line info of where +// in the ATN a rule is invoked. +// +// this very useful for error messages. + +func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string { + if c == nil { + c = p.ctx + } + stack := make([]string, 0) + for c != nil { + // compute what follows who invoked us + ruleIndex := c.GetRuleIndex() + if ruleIndex < 0 { + stack = append(stack, "n/a") + } else { + stack = append(stack, p.GetRuleNames()[ruleIndex]) + } + + vp := c.GetParent() + + if vp == nil { + break + } + + c = vp.(ParserRuleContext) + } + return stack +} + +// For debugging and other purposes.// +func (p *BaseParser) GetDFAStrings() string { + return fmt.Sprint(p.Interpreter.decisionToDFA) +} + +// For debugging and other purposes.// +func (p *BaseParser) DumpDFA() { + seenOne := false + for _, dfa := range p.Interpreter.decisionToDFA { + if dfa.states.Len() > 0 { + if seenOne { + fmt.Println() + } + fmt.Println("Decision " + strconv.Itoa(dfa.decision) + ":") + fmt.Print(dfa.String(p.LiteralNames, p.SymbolicNames)) + seenOne = true + } + } +} + +func (p *BaseParser) GetSourceName() string { + return p.GrammarFileName +} + +// During a parse is sometimes useful to listen in on the rule entry and exit +// events as well as token Matches. p.is for quick and dirty debugging. +func (p *BaseParser) SetTrace(trace *TraceListener) { + if trace == nil { + p.RemoveParseListener(p.tracer) + p.tracer = nil + } else { + if p.tracer != nil { + p.RemoveParseListener(p.tracer) + } + p.tracer = NewTraceListener(p) + p.AddParseListener(p.tracer) + } +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_atn_simulator.go index 8bcc46a0d9..95a3bb000c 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_atn_simulator.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_atn_simulator.go @@ -1,1559 +1,1559 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "strconv" - "strings" -) - -var ( - ParserATNSimulatorDebug = false - ParserATNSimulatorTraceATNSim = false - ParserATNSimulatorDFADebug = false - ParserATNSimulatorRetryDebug = false - TurnOffLRLoopEntryBranchOpt = false -) - -type ParserATNSimulator struct { - *BaseATNSimulator - - parser Parser - predictionMode int - input TokenStream - startIndex int - dfa *DFA - mergeCache *DoubleDict - outerContext ParserRuleContext -} - -func NewParserATNSimulator(parser Parser, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *ParserATNSimulator { - - p := new(ParserATNSimulator) - - p.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache) - - p.parser = parser - p.decisionToDFA = decisionToDFA - // SLL, LL, or LL + exact ambig detection?// - p.predictionMode = PredictionModeLL - // LAME globals to avoid parameters!!!!! I need these down deep in predTransition - p.input = nil - p.startIndex = 0 - p.outerContext = nil - p.dfa = nil - // Each prediction operation uses a cache for merge of prediction contexts. - // Don't keep around as it wastes huge amounts of memory. DoubleKeyMap - // isn't Synchronized but we're ok since two threads shouldn't reuse same - // parser/atnsim object because it can only handle one input at a time. - // This maps graphs a and b to merged result c. (a,b)&rarrc. We can avoid - // the merge if we ever see a and b again. Note that (b,a)&rarrc should - // also be examined during cache lookup. - // - p.mergeCache = nil - - return p -} - -func (p *ParserATNSimulator) GetPredictionMode() int { - return p.predictionMode -} - -func (p *ParserATNSimulator) SetPredictionMode(v int) { - p.predictionMode = v -} - -func (p *ParserATNSimulator) reset() { -} - -func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, outerContext ParserRuleContext) int { - if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim { - fmt.Println("adaptivePredict decision " + strconv.Itoa(decision) + - " exec LA(1)==" + p.getLookaheadName(input) + - " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + - strconv.Itoa(input.LT(1).GetColumn())) - } - - p.input = input - p.startIndex = input.Index() - p.outerContext = outerContext - - dfa := p.decisionToDFA[decision] - p.dfa = dfa - m := input.Mark() - index := input.Index() - - defer func() { - p.dfa = nil - p.mergeCache = nil // wack cache after each prediction - input.Seek(index) - input.Release(m) - }() - - // Now we are certain to have a specific decision's DFA - // But, do we still need an initial state? - var s0 *DFAState - p.atn.stateMu.RLock() - if dfa.getPrecedenceDfa() { - p.atn.edgeMu.RLock() - // the start state for a precedence DFA depends on the current - // parser precedence, and is provided by a DFA method. - s0 = dfa.getPrecedenceStartState(p.parser.GetPrecedence()) - p.atn.edgeMu.RUnlock() - } else { - // the start state for a "regular" DFA is just s0 - s0 = dfa.getS0() - } - p.atn.stateMu.RUnlock() - - if s0 == nil { - if outerContext == nil { - outerContext = ParserRuleContextEmpty - } - if ParserATNSimulatorDebug { - fmt.Println("predictATN decision " + strconv.Itoa(dfa.decision) + - " exec LA(1)==" + p.getLookaheadName(input) + - ", outerContext=" + outerContext.String(p.parser.GetRuleNames(), nil)) - } - fullCtx := false - s0Closure := p.computeStartState(dfa.atnStartState, ParserRuleContextEmpty, fullCtx) - - p.atn.stateMu.Lock() - if dfa.getPrecedenceDfa() { - // If p is a precedence DFA, we use applyPrecedenceFilter - // to convert the computed start state to a precedence start - // state. We then use DFA.setPrecedenceStartState to set the - // appropriate start state for the precedence level rather - // than simply setting DFA.s0. - // - dfa.s0.configs = s0Closure - s0Closure = p.applyPrecedenceFilter(s0Closure) - s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure)) - p.atn.edgeMu.Lock() - dfa.setPrecedenceStartState(p.parser.GetPrecedence(), s0) - p.atn.edgeMu.Unlock() - } else { - s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure)) - dfa.setS0(s0) - } - p.atn.stateMu.Unlock() - } - - alt := p.execATN(dfa, s0, input, index, outerContext) - if ParserATNSimulatorDebug { - fmt.Println("DFA after predictATN: " + dfa.String(p.parser.GetLiteralNames(), nil)) - } - return alt - -} - -// Performs ATN simulation to compute a predicted alternative based -// upon the remaining input, but also updates the DFA cache to avoid -// having to traverse the ATN again for the same input sequence. - -// There are some key conditions we're looking for after computing a new -// set of ATN configs (proposed DFA state): -// if the set is empty, there is no viable alternative for current symbol -// does the state uniquely predict an alternative? -// does the state have a conflict that would prevent us from -// putting it on the work list? - -// We also have some key operations to do: -// add an edge from previous DFA state to potentially NewDFA state, D, -// upon current symbol but only if adding to work list, which means in all -// cases except no viable alternative (and possibly non-greedy decisions?) -// collecting predicates and adding semantic context to DFA accept states -// adding rule context to context-sensitive DFA accept states -// consuming an input symbol -// Reporting a conflict -// Reporting an ambiguity -// Reporting a context sensitivity -// Reporting insufficient predicates - -// cover these cases: -// -// dead end -// single alt -// single alt + preds -// conflict -// conflict + preds -func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, startIndex int, outerContext ParserRuleContext) int { - - if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim { - fmt.Println("execATN decision " + strconv.Itoa(dfa.decision) + - ", DFA state " + s0.String() + - ", LA(1)==" + p.getLookaheadName(input) + - " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + strconv.Itoa(input.LT(1).GetColumn())) - } - - previousD := s0 - - if ParserATNSimulatorDebug { - fmt.Println("s0 = " + s0.String()) - } - t := input.LA(1) - for { // for more work - D := p.getExistingTargetState(previousD, t) - if D == nil { - D = p.computeTargetState(dfa, previousD, t) - } - if D == ATNSimulatorError { - // if any configs in previous dipped into outer context, that - // means that input up to t actually finished entry rule - // at least for SLL decision. Full LL doesn't dip into outer - // so don't need special case. - // We will get an error no matter what so delay until after - // decision better error message. Also, no reachable target - // ATN states in SLL implies LL will also get nowhere. - // If conflict in states that dip out, choose min since we - // will get error no matter what. - e := p.noViableAlt(input, outerContext, previousD.configs, startIndex) - input.Seek(startIndex) - alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previousD.configs, outerContext) - if alt != ATNInvalidAltNumber { - return alt - } - - panic(e) - } - if D.requiresFullContext && p.predictionMode != PredictionModeSLL { - // IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error) - conflictingAlts := D.configs.GetConflictingAlts() - if D.predicates != nil { - if ParserATNSimulatorDebug { - fmt.Println("DFA state has preds in DFA sim LL failover") - } - conflictIndex := input.Index() - if conflictIndex != startIndex { - input.Seek(startIndex) - } - conflictingAlts = p.evalSemanticContext(D.predicates, outerContext, true) - if conflictingAlts.length() == 1 { - if ParserATNSimulatorDebug { - fmt.Println("Full LL avoided") - } - return conflictingAlts.minValue() - } - if conflictIndex != startIndex { - // restore the index so Reporting the fallback to full - // context occurs with the index at the correct spot - input.Seek(conflictIndex) - } - } - if ParserATNSimulatorDFADebug { - fmt.Println("ctx sensitive state " + outerContext.String(nil, nil) + " in " + D.String()) - } - fullCtx := true - s0Closure := p.computeStartState(dfa.atnStartState, outerContext, fullCtx) - p.ReportAttemptingFullContext(dfa, conflictingAlts, D.configs, startIndex, input.Index()) - alt := p.execATNWithFullContext(dfa, D, s0Closure, input, startIndex, outerContext) - return alt - } - if D.isAcceptState { - if D.predicates == nil { - return D.prediction - } - stopIndex := input.Index() - input.Seek(startIndex) - alts := p.evalSemanticContext(D.predicates, outerContext, true) - - switch alts.length() { - case 0: - panic(p.noViableAlt(input, outerContext, D.configs, startIndex)) - case 1: - return alts.minValue() - default: - // Report ambiguity after predicate evaluation to make sure the correct set of ambig alts is Reported. - p.ReportAmbiguity(dfa, D, startIndex, stopIndex, false, alts, D.configs) - return alts.minValue() - } - } - previousD = D - - if t != TokenEOF { - input.Consume() - t = input.LA(1) - } - } -} - -// Get an existing target state for an edge in the DFA. If the target state -// for the edge has not yet been computed or is otherwise not available, -// p method returns {@code nil}. -// -// @param previousD The current DFA state -// @param t The next input symbol -// @return The existing target DFA state for the given input symbol -// {@code t}, or {@code nil} if the target state for p edge is not -// already cached - -func (p *ParserATNSimulator) getExistingTargetState(previousD *DFAState, t int) *DFAState { - if t+1 < 0 { - return nil - } - - p.atn.edgeMu.RLock() - defer p.atn.edgeMu.RUnlock() - edges := previousD.getEdges() - if edges == nil || t+1 >= len(edges) { - return nil - } - return previousD.getIthEdge(t + 1) -} - -// Compute a target state for an edge in the DFA, and attempt to add the -// computed state and corresponding edge to the DFA. -// -// @param dfa The DFA -// @param previousD The current DFA state -// @param t The next input symbol -// -// @return The computed target DFA state for the given input symbol -// {@code t}. If {@code t} does not lead to a valid DFA state, p method -// returns {@link //ERROR}. - -func (p *ParserATNSimulator) computeTargetState(dfa *DFA, previousD *DFAState, t int) *DFAState { - reach := p.computeReachSet(previousD.configs, t, false) - - if reach == nil { - p.addDFAEdge(dfa, previousD, t, ATNSimulatorError) - return ATNSimulatorError - } - // create Newtarget state we'll add to DFA after it's complete - D := NewDFAState(-1, reach) - - predictedAlt := p.getUniqueAlt(reach) - - if ParserATNSimulatorDebug { - altSubSets := PredictionModegetConflictingAltSubsets(reach) - fmt.Println("SLL altSubSets=" + fmt.Sprint(altSubSets) + - ", previous=" + previousD.configs.String() + - ", configs=" + reach.String() + - ", predict=" + strconv.Itoa(predictedAlt) + - ", allSubsetsConflict=" + - fmt.Sprint(PredictionModeallSubsetsConflict(altSubSets)) + - ", conflictingAlts=" + p.getConflictingAlts(reach).String()) - } - if predictedAlt != ATNInvalidAltNumber { - // NO CONFLICT, UNIQUELY PREDICTED ALT - D.isAcceptState = true - D.configs.SetUniqueAlt(predictedAlt) - D.setPrediction(predictedAlt) - } else if PredictionModehasSLLConflictTerminatingPrediction(p.predictionMode, reach) { - // MORE THAN ONE VIABLE ALTERNATIVE - D.configs.SetConflictingAlts(p.getConflictingAlts(reach)) - D.requiresFullContext = true - // in SLL-only mode, we will stop at p state and return the minimum alt - D.isAcceptState = true - D.setPrediction(D.configs.GetConflictingAlts().minValue()) - } - if D.isAcceptState && D.configs.HasSemanticContext() { - p.predicateDFAState(D, p.atn.getDecisionState(dfa.decision)) - if D.predicates != nil { - D.setPrediction(ATNInvalidAltNumber) - } - } - // all adds to dfa are done after we've created full D state - D = p.addDFAEdge(dfa, previousD, t, D) - return D -} - -func (p *ParserATNSimulator) predicateDFAState(dfaState *DFAState, decisionState DecisionState) { - // We need to test all predicates, even in DFA states that - // uniquely predict alternative. - nalts := len(decisionState.GetTransitions()) - // Update DFA so reach becomes accept state with (predicate,alt) - // pairs if preds found for conflicting alts - altsToCollectPredsFrom := p.getConflictingAltsOrUniqueAlt(dfaState.configs) - altToPred := p.getPredsForAmbigAlts(altsToCollectPredsFrom, dfaState.configs, nalts) - if altToPred != nil { - dfaState.predicates = p.getPredicatePredictions(altsToCollectPredsFrom, altToPred) - dfaState.setPrediction(ATNInvalidAltNumber) // make sure we use preds - } else { - // There are preds in configs but they might go away - // when OR'd together like {p}? || NONE == NONE. If neither - // alt has preds, resolve to min alt - dfaState.setPrediction(altsToCollectPredsFrom.minValue()) - } -} - -// comes back with reach.uniqueAlt set to a valid alt -func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 ATNConfigSet, input TokenStream, startIndex int, outerContext ParserRuleContext) int { - - if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim { - fmt.Println("execATNWithFullContext " + s0.String()) - } - - fullCtx := true - foundExactAmbig := false - var reach ATNConfigSet - previous := s0 - input.Seek(startIndex) - t := input.LA(1) - predictedAlt := -1 - - for { // for more work - reach = p.computeReachSet(previous, t, fullCtx) - if reach == nil { - // if any configs in previous dipped into outer context, that - // means that input up to t actually finished entry rule - // at least for LL decision. Full LL doesn't dip into outer - // so don't need special case. - // We will get an error no matter what so delay until after - // decision better error message. Also, no reachable target - // ATN states in SLL implies LL will also get nowhere. - // If conflict in states that dip out, choose min since we - // will get error no matter what. - e := p.noViableAlt(input, outerContext, previous, startIndex) - input.Seek(startIndex) - alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previous, outerContext) - if alt != ATNInvalidAltNumber { - return alt - } - - panic(e) - } - altSubSets := PredictionModegetConflictingAltSubsets(reach) - if ParserATNSimulatorDebug { - fmt.Println("LL altSubSets=" + fmt.Sprint(altSubSets) + ", predict=" + - strconv.Itoa(PredictionModegetUniqueAlt(altSubSets)) + ", resolvesToJustOneViableAlt=" + - fmt.Sprint(PredictionModeresolvesToJustOneViableAlt(altSubSets))) - } - reach.SetUniqueAlt(p.getUniqueAlt(reach)) - // unique prediction? - if reach.GetUniqueAlt() != ATNInvalidAltNumber { - predictedAlt = reach.GetUniqueAlt() - break - } - if p.predictionMode != PredictionModeLLExactAmbigDetection { - predictedAlt = PredictionModeresolvesToJustOneViableAlt(altSubSets) - if predictedAlt != ATNInvalidAltNumber { - break - } - } else { - // In exact ambiguity mode, we never try to terminate early. - // Just keeps scarfing until we know what the conflict is - if PredictionModeallSubsetsConflict(altSubSets) && PredictionModeallSubsetsEqual(altSubSets) { - foundExactAmbig = true - predictedAlt = PredictionModegetSingleViableAlt(altSubSets) - break - } - // else there are multiple non-conflicting subsets or - // we're not sure what the ambiguity is yet. - // So, keep going. - } - previous = reach - if t != TokenEOF { - input.Consume() - t = input.LA(1) - } - } - // If the configuration set uniquely predicts an alternative, - // without conflict, then we know that it's a full LL decision - // not SLL. - if reach.GetUniqueAlt() != ATNInvalidAltNumber { - p.ReportContextSensitivity(dfa, predictedAlt, reach, startIndex, input.Index()) - return predictedAlt - } - // We do not check predicates here because we have checked them - // on-the-fly when doing full context prediction. - - // - // In non-exact ambiguity detection mode, we might actually be able to - // detect an exact ambiguity, but I'm not going to spend the cycles - // needed to check. We only emit ambiguity warnings in exact ambiguity - // mode. - // - // For example, we might know that we have conflicting configurations. - // But, that does not mean that there is no way forward without a - // conflict. It's possible to have nonconflicting alt subsets as in: - - // altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}] - - // from - // - // [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]), - // (13,2,[5 10 $]), (21,2,[5 10 $]), (11,2,[$])] - // - // In p case, (17,1,[5 $]) indicates there is some next sequence that - // would resolve p without conflict to alternative 1. Any other viable - // next sequence, however, is associated with a conflict. We stop - // looking for input because no amount of further lookahead will alter - // the fact that we should predict alternative 1. We just can't say for - // sure that there is an ambiguity without looking further. - - p.ReportAmbiguity(dfa, D, startIndex, input.Index(), foundExactAmbig, reach.Alts(), reach) - - return predictedAlt -} - -func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCtx bool) ATNConfigSet { - if p.mergeCache == nil { - p.mergeCache = NewDoubleDict() - } - intermediate := NewBaseATNConfigSet(fullCtx) - - // Configurations already in a rule stop state indicate reaching the end - // of the decision rule (local context) or end of the start rule (full - // context). Once reached, these configurations are never updated by a - // closure operation, so they are handled separately for the performance - // advantage of having a smaller intermediate set when calling closure. - // - // For full-context reach operations, separate handling is required to - // ensure that the alternative Matching the longest overall sequence is - // chosen when multiple such configurations can Match the input. - - var skippedStopStates []*BaseATNConfig - - // First figure out where we can reach on input t - for _, c := range closure.GetItems() { - if ParserATNSimulatorDebug { - fmt.Println("testing " + p.GetTokenName(t) + " at " + c.String()) - } - - if _, ok := c.GetState().(*RuleStopState); ok { - if fullCtx || t == TokenEOF { - skippedStopStates = append(skippedStopStates, c.(*BaseATNConfig)) - if ParserATNSimulatorDebug { - fmt.Println("added " + c.String() + " to SkippedStopStates") - } - } - continue - } - - for _, trans := range c.GetState().GetTransitions() { - target := p.getReachableTarget(trans, t) - if target != nil { - cfg := NewBaseATNConfig4(c, target) - intermediate.Add(cfg, p.mergeCache) - if ParserATNSimulatorDebug { - fmt.Println("added " + cfg.String() + " to intermediate") - } - } - } - } - - // Now figure out where the reach operation can take us... - var reach ATNConfigSet - - // This block optimizes the reach operation for intermediate sets which - // trivially indicate a termination state for the overall - // AdaptivePredict operation. - // - // The conditions assume that intermediate - // contains all configurations relevant to the reach set, but p - // condition is not true when one or more configurations have been - // withheld in SkippedStopStates, or when the current symbol is EOF. - // - if skippedStopStates == nil && t != TokenEOF { - if len(intermediate.configs) == 1 { - // Don't pursue the closure if there is just one state. - // It can only have one alternative just add to result - // Also don't pursue the closure if there is unique alternative - // among the configurations. - reach = intermediate - } else if p.getUniqueAlt(intermediate) != ATNInvalidAltNumber { - // Also don't pursue the closure if there is unique alternative - // among the configurations. - reach = intermediate - } - } - // If the reach set could not be trivially determined, perform a closure - // operation on the intermediate set to compute its initial value. - // - if reach == nil { - reach = NewBaseATNConfigSet(fullCtx) - closureBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst) - treatEOFAsEpsilon := t == TokenEOF - amount := len(intermediate.configs) - for k := 0; k < amount; k++ { - p.closure(intermediate.configs[k], reach, closureBusy, false, fullCtx, treatEOFAsEpsilon) - } - } - if t == TokenEOF { - // After consuming EOF no additional input is possible, so we are - // only interested in configurations which reached the end of the - // decision rule (local context) or end of the start rule (full - // context). Update reach to contain only these configurations. This - // handles both explicit EOF transitions in the grammar and implicit - // EOF transitions following the end of the decision or start rule. - // - // When reach==intermediate, no closure operation was performed. In - // p case, removeAllConfigsNotInRuleStopState needs to check for - // reachable rule stop states as well as configurations already in - // a rule stop state. - // - // This is handled before the configurations in SkippedStopStates, - // because any configurations potentially added from that list are - // already guaranteed to meet p condition whether or not it's - // required. - // - reach = p.removeAllConfigsNotInRuleStopState(reach, reach == intermediate) - } - // If SkippedStopStates!=nil, then it contains at least one - // configuration. For full-context reach operations, these - // configurations reached the end of the start rule, in which case we - // only add them back to reach if no configuration during the current - // closure operation reached such a state. This ensures AdaptivePredict - // chooses an alternative Matching the longest overall sequence when - // multiple alternatives are viable. - // - if skippedStopStates != nil && ((!fullCtx) || (!PredictionModehasConfigInRuleStopState(reach))) { - for l := 0; l < len(skippedStopStates); l++ { - reach.Add(skippedStopStates[l], p.mergeCache) - } - } - - if ParserATNSimulatorTraceATNSim { - fmt.Println("computeReachSet " + closure.String() + " -> " + reach.String()) - } - - if len(reach.GetItems()) == 0 { - return nil - } - - return reach -} - -// Return a configuration set containing only the configurations from -// {@code configs} which are in a {@link RuleStopState}. If all -// configurations in {@code configs} are already in a rule stop state, p -// method simply returns {@code configs}. -// -//

When {@code lookToEndOfRule} is true, p method uses -// {@link ATN//NextTokens} for each configuration in {@code configs} which is -// not already in a rule stop state to see if a rule stop state is reachable -// from the configuration via epsilon-only transitions.

-// -// @param configs the configuration set to update -// @param lookToEndOfRule when true, p method checks for rule stop states -// reachable by epsilon-only transitions from each configuration in -// {@code configs}. -// -// @return {@code configs} if all configurations in {@code configs} are in a -// rule stop state, otherwise return a Newconfiguration set containing only -// the configurations from {@code configs} which are in a rule stop state -func (p *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs ATNConfigSet, lookToEndOfRule bool) ATNConfigSet { - if PredictionModeallConfigsInRuleStopStates(configs) { - return configs - } - result := NewBaseATNConfigSet(configs.FullContext()) - for _, config := range configs.GetItems() { - if _, ok := config.GetState().(*RuleStopState); ok { - result.Add(config, p.mergeCache) - continue - } - if lookToEndOfRule && config.GetState().GetEpsilonOnlyTransitions() { - NextTokens := p.atn.NextTokens(config.GetState(), nil) - if NextTokens.contains(TokenEpsilon) { - endOfRuleState := p.atn.ruleToStopState[config.GetState().GetRuleIndex()] - result.Add(NewBaseATNConfig4(config, endOfRuleState), p.mergeCache) - } - } - } - return result -} - -func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, fullCtx bool) ATNConfigSet { - // always at least the implicit call to start rule - initialContext := predictionContextFromRuleContext(p.atn, ctx) - configs := NewBaseATNConfigSet(fullCtx) - if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim { - fmt.Println("computeStartState from ATN state " + a.String() + - " initialContext=" + initialContext.String()) - } - - for i := 0; i < len(a.GetTransitions()); i++ { - target := a.GetTransitions()[i].getTarget() - c := NewBaseATNConfig6(target, i+1, initialContext) - closureBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](atnConfCompInst) - p.closure(c, configs, closureBusy, true, fullCtx, false) - } - return configs -} - -// This method transforms the start state computed by -// {@link //computeStartState} to the special start state used by a -// precedence DFA for a particular precedence value. The transformation -// process applies the following changes to the start state's configuration -// set. -// -//
    -//
  1. Evaluate the precedence predicates for each configuration using -// {@link SemanticContext//evalPrecedence}.
  2. -//
  3. Remove all configurations which predict an alternative greater than -// 1, for which another configuration that predicts alternative 1 is in the -// same ATN state with the same prediction context. This transformation is -// valid for the following reasons: -//
      -//
    • The closure block cannot contain any epsilon transitions which bypass -// the body of the closure, so all states reachable via alternative 1 are -// part of the precedence alternatives of the transformed left-recursive -// rule.
    • -//
    • The "primary" portion of a left recursive rule cannot contain an -// epsilon transition, so the only way an alternative other than 1 can exist -// in a state that is also reachable via alternative 1 is by nesting calls -// to the left-recursive rule, with the outer calls not being at the -// preferred precedence level.
    • -//
    -//
  4. -//
-// -//

-// The prediction context must be considered by p filter to address -// situations like the following. -//

-// -//
-// grammar TA
-// prog: statement* EOF
-// statement: letterA | statement letterA 'b'
-// letterA: 'a'
-// 
-//
-//

-// If the above grammar, the ATN state immediately before the token -// reference {@code 'a'} in {@code letterA} is reachable from the left edge -// of both the primary and closure blocks of the left-recursive rule -// {@code statement}. The prediction context associated with each of these -// configurations distinguishes between them, and prevents the alternative -// which stepped out to {@code prog} (and then back in to {@code statement} -// from being eliminated by the filter. -//

-// -// @param configs The configuration set computed by -// {@link //computeStartState} as the start state for the DFA. -// @return The transformed configuration set representing the start state -// for a precedence DFA at a particular precedence level (determined by -// calling {@link Parser//getPrecedence}). -func (p *ParserATNSimulator) applyPrecedenceFilter(configs ATNConfigSet) ATNConfigSet { - - statesFromAlt1 := make(map[int]PredictionContext) - configSet := NewBaseATNConfigSet(configs.FullContext()) - - for _, config := range configs.GetItems() { - // handle alt 1 first - if config.GetAlt() != 1 { - continue - } - updatedContext := config.GetSemanticContext().evalPrecedence(p.parser, p.outerContext) - if updatedContext == nil { - // the configuration was eliminated - continue - } - statesFromAlt1[config.GetState().GetStateNumber()] = config.GetContext() - if updatedContext != config.GetSemanticContext() { - configSet.Add(NewBaseATNConfig2(config, updatedContext), p.mergeCache) - } else { - configSet.Add(config, p.mergeCache) - } - } - for _, config := range configs.GetItems() { - - if config.GetAlt() == 1 { - // already handled - continue - } - // In the future, p elimination step could be updated to also - // filter the prediction context for alternatives predicting alt>1 - // (basically a graph subtraction algorithm). - if !config.getPrecedenceFilterSuppressed() { - context := statesFromAlt1[config.GetState().GetStateNumber()] - if context != nil && context.Equals(config.GetContext()) { - // eliminated - continue - } - } - configSet.Add(config, p.mergeCache) - } - return configSet -} - -func (p *ParserATNSimulator) getReachableTarget(trans Transition, ttype int) ATNState { - if trans.Matches(ttype, 0, p.atn.maxTokenType) { - return trans.getTarget() - } - - return nil -} - -func (p *ParserATNSimulator) getPredsForAmbigAlts(ambigAlts *BitSet, configs ATNConfigSet, nalts int) []SemanticContext { - - altToPred := make([]SemanticContext, nalts+1) - for _, c := range configs.GetItems() { - if ambigAlts.contains(c.GetAlt()) { - altToPred[c.GetAlt()] = SemanticContextorContext(altToPred[c.GetAlt()], c.GetSemanticContext()) - } - } - nPredAlts := 0 - for i := 1; i <= nalts; i++ { - pred := altToPred[i] - if pred == nil { - altToPred[i] = SemanticContextNone - } else if pred != SemanticContextNone { - nPredAlts++ - } - } - // nonambig alts are nil in altToPred - if nPredAlts == 0 { - altToPred = nil - } - if ParserATNSimulatorDebug { - fmt.Println("getPredsForAmbigAlts result " + fmt.Sprint(altToPred)) - } - return altToPred -} - -func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPred []SemanticContext) []*PredPrediction { - pairs := make([]*PredPrediction, 0) - containsPredicate := false - for i := 1; i < len(altToPred); i++ { - pred := altToPred[i] - // unpredicated is indicated by SemanticContextNONE - if ambigAlts != nil && ambigAlts.contains(i) { - pairs = append(pairs, NewPredPrediction(pred, i)) - } - if pred != SemanticContextNone { - containsPredicate = true - } - } - if !containsPredicate { - return nil - } - return pairs -} - -// This method is used to improve the localization of error messages by -// choosing an alternative rather than panicing a -// {@link NoViableAltException} in particular prediction scenarios where the -// {@link //ERROR} state was reached during ATN simulation. -// -//

-// The default implementation of p method uses the following -// algorithm to identify an ATN configuration which successfully parsed the -// decision entry rule. Choosing such an alternative ensures that the -// {@link ParserRuleContext} returned by the calling rule will be complete -// and valid, and the syntax error will be Reported later at a more -// localized location.

-// -//
    -//
  • If a syntactically valid path or paths reach the end of the decision rule and -// they are semantically valid if predicated, return the min associated alt.
  • -//
  • Else, if a semantically invalid but syntactically valid path exist -// or paths exist, return the minimum associated alt. -//
  • -//
  • Otherwise, return {@link ATN//INVALID_ALT_NUMBER}.
  • -//
-// -//

-// In some scenarios, the algorithm described above could predict an -// alternative which will result in a {@link FailedPredicateException} in -// the parser. Specifically, p could occur if the only configuration -// capable of successfully parsing to the end of the decision rule is -// blocked by a semantic predicate. By choosing p alternative within -// {@link //AdaptivePredict} instead of panicing a -// {@link NoViableAltException}, the resulting -// {@link FailedPredicateException} in the parser will identify the specific -// predicate which is preventing the parser from successfully parsing the -// decision rule, which helps developers identify and correct logic errors -// in semantic predicates. -//

-// -// @param configs The ATN configurations which were valid immediately before -// the {@link //ERROR} state was reached -// @param outerContext The is the \gamma_0 initial parser context from the paper -// or the parser stack at the instant before prediction commences. -// -// @return The value to return from {@link //AdaptivePredict}, or -// {@link ATN//INVALID_ALT_NUMBER} if a suitable alternative was not -// identified and {@link //AdaptivePredict} should Report an error instead. -func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs ATNConfigSet, outerContext ParserRuleContext) int { - cfgs := p.splitAccordingToSemanticValidity(configs, outerContext) - semValidConfigs := cfgs[0] - semInvalidConfigs := cfgs[1] - alt := p.GetAltThatFinishedDecisionEntryRule(semValidConfigs) - if alt != ATNInvalidAltNumber { // semantically/syntactically viable path exists - return alt - } - // Is there a syntactically valid path with a failed pred? - if len(semInvalidConfigs.GetItems()) > 0 { - alt = p.GetAltThatFinishedDecisionEntryRule(semInvalidConfigs) - if alt != ATNInvalidAltNumber { // syntactically viable path exists - return alt - } - } - return ATNInvalidAltNumber -} - -func (p *ParserATNSimulator) GetAltThatFinishedDecisionEntryRule(configs ATNConfigSet) int { - alts := NewIntervalSet() - - for _, c := range configs.GetItems() { - _, ok := c.GetState().(*RuleStopState) - - if c.GetReachesIntoOuterContext() > 0 || (ok && c.GetContext().hasEmptyPath()) { - alts.addOne(c.GetAlt()) - } - } - if alts.length() == 0 { - return ATNInvalidAltNumber - } - - return alts.first() -} - -// Walk the list of configurations and split them according to -// those that have preds evaluating to true/false. If no pred, assume -// true pred and include in succeeded set. Returns Pair of sets. -// -// Create a NewSet so as not to alter the incoming parameter. -// -// Assumption: the input stream has been restored to the starting point -// prediction, which is where predicates need to evaluate. - -type ATNConfigSetPair struct { - item0, item1 ATNConfigSet -} - -func (p *ParserATNSimulator) splitAccordingToSemanticValidity(configs ATNConfigSet, outerContext ParserRuleContext) []ATNConfigSet { - succeeded := NewBaseATNConfigSet(configs.FullContext()) - failed := NewBaseATNConfigSet(configs.FullContext()) - - for _, c := range configs.GetItems() { - if c.GetSemanticContext() != SemanticContextNone { - predicateEvaluationResult := c.GetSemanticContext().evaluate(p.parser, outerContext) - if predicateEvaluationResult { - succeeded.Add(c, nil) - } else { - failed.Add(c, nil) - } - } else { - succeeded.Add(c, nil) - } - } - return []ATNConfigSet{succeeded, failed} -} - -// Look through a list of predicate/alt pairs, returning alts for the -// -// pairs that win. A {@code NONE} predicate indicates an alt containing an -// unpredicated config which behaves as "always true." If !complete -// then we stop at the first predicate that evaluates to true. This -// includes pairs with nil predicates. -func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPrediction, outerContext ParserRuleContext, complete bool) *BitSet { - predictions := NewBitSet() - for i := 0; i < len(predPredictions); i++ { - pair := predPredictions[i] - if pair.pred == SemanticContextNone { - predictions.add(pair.alt) - if !complete { - break - } - continue - } - - predicateEvaluationResult := pair.pred.evaluate(p.parser, outerContext) - if ParserATNSimulatorDebug || ParserATNSimulatorDFADebug { - fmt.Println("eval pred " + pair.String() + "=" + fmt.Sprint(predicateEvaluationResult)) - } - if predicateEvaluationResult { - if ParserATNSimulatorDebug || ParserATNSimulatorDFADebug { - fmt.Println("PREDICT " + fmt.Sprint(pair.alt)) - } - predictions.add(pair.alt) - if !complete { - break - } - } - } - return predictions -} - -func (p *ParserATNSimulator) closure(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx, treatEOFAsEpsilon bool) { - initialDepth := 0 - p.closureCheckingStopState(config, configs, closureBusy, collectPredicates, - fullCtx, initialDepth, treatEOFAsEpsilon) -} - -func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { - if ParserATNSimulatorTraceATNSim { - fmt.Println("closure(" + config.String() + ")") - //fmt.Println("configs(" + configs.String() + ")") - if config.GetReachesIntoOuterContext() > 50 { - panic("problem") - } - } - - if _, ok := config.GetState().(*RuleStopState); ok { - // We hit rule end. If we have context info, use it - // run thru all possible stack tops in ctx - if !config.GetContext().isEmpty() { - for i := 0; i < config.GetContext().length(); i++ { - if config.GetContext().getReturnState(i) == BasePredictionContextEmptyReturnState { - if fullCtx { - configs.Add(NewBaseATNConfig1(config, config.GetState(), BasePredictionContextEMPTY), p.mergeCache) - continue - } else { - // we have no context info, just chase follow links (if greedy) - if ParserATNSimulatorDebug { - fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex())) - } - p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) - } - continue - } - returnState := p.atn.states[config.GetContext().getReturnState(i)] - newContext := config.GetContext().GetParent(i) // "pop" return state - - c := NewBaseATNConfig5(returnState, config.GetAlt(), newContext, config.GetSemanticContext()) - // While we have context to pop back from, we may have - // gotten that context AFTER having falling off a rule. - // Make sure we track that we are now out of context. - c.SetReachesIntoOuterContext(config.GetReachesIntoOuterContext()) - p.closureCheckingStopState(c, configs, closureBusy, collectPredicates, fullCtx, depth-1, treatEOFAsEpsilon) - } - return - } else if fullCtx { - // reached end of start rule - configs.Add(config, p.mergeCache) - return - } else { - // else if we have no context info, just chase follow links (if greedy) - if ParserATNSimulatorDebug { - fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex())) - } - } - } - p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) -} - -// Do the actual work of walking epsilon edges// -func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { - state := config.GetState() - // optimization - if !state.GetEpsilonOnlyTransitions() { - configs.Add(config, p.mergeCache) - // make sure to not return here, because EOF transitions can act as - // both epsilon transitions and non-epsilon transitions. - } - for i := 0; i < len(state.GetTransitions()); i++ { - if i == 0 && p.canDropLoopEntryEdgeInLeftRecursiveRule(config) { - continue - } - - t := state.GetTransitions()[i] - _, ok := t.(*ActionTransition) - continueCollecting := collectPredicates && !ok - c := p.getEpsilonTarget(config, t, continueCollecting, depth == 0, fullCtx, treatEOFAsEpsilon) - if ci, ok := c.(*BaseATNConfig); ok && ci != nil { - newDepth := depth - - if _, ok := config.GetState().(*RuleStopState); ok { - // target fell off end of rule mark resulting c as having dipped into outer context - // We can't get here if incoming config was rule stop and we had context - // track how far we dip into outer context. Might - // come in handy and we avoid evaluating context dependent - // preds if p is > 0. - - if p.dfa != nil && p.dfa.getPrecedenceDfa() { - if t.(*EpsilonTransition).outermostPrecedenceReturn == p.dfa.atnStartState.GetRuleIndex() { - c.setPrecedenceFilterSuppressed(true) - } - } - - c.SetReachesIntoOuterContext(c.GetReachesIntoOuterContext() + 1) - - _, present := closureBusy.Put(c) - if present { - // avoid infinite recursion for right-recursive rules - continue - } - - configs.SetDipsIntoOuterContext(true) // TODO: can remove? only care when we add to set per middle of p method - newDepth-- - if ParserATNSimulatorDebug { - fmt.Println("dips into outer ctx: " + c.String()) - } - } else { - - if !t.getIsEpsilon() { - _, present := closureBusy.Put(c) - if present { - // avoid infinite recursion for EOF* and EOF+ - continue - } - } - if _, ok := t.(*RuleTransition); ok { - // latch when newDepth goes negative - once we step out of the entry context we can't return - if newDepth >= 0 { - newDepth++ - } - } - } - p.closureCheckingStopState(c, configs, closureBusy, continueCollecting, fullCtx, newDepth, treatEOFAsEpsilon) - } - } -} - -func (p *ParserATNSimulator) canDropLoopEntryEdgeInLeftRecursiveRule(config ATNConfig) bool { - if TurnOffLRLoopEntryBranchOpt { - return false - } - - _p := config.GetState() - - // First check to see if we are in StarLoopEntryState generated during - // left-recursion elimination. For efficiency, also check if - // the context has an empty stack case. If so, it would mean - // global FOLLOW so we can't perform optimization - if _p.GetStateType() != ATNStateStarLoopEntry { - return false - } - startLoop, ok := _p.(*StarLoopEntryState) - if !ok { - return false - } - if !startLoop.precedenceRuleDecision || - config.GetContext().isEmpty() || - config.GetContext().hasEmptyPath() { - return false - } - - // Require all return states to return back to the same rule - // that p is in. - numCtxs := config.GetContext().length() - for i := 0; i < numCtxs; i++ { - returnState := p.atn.states[config.GetContext().getReturnState(i)] - if returnState.GetRuleIndex() != _p.GetRuleIndex() { - return false - } - } - x := _p.GetTransitions()[0].getTarget() - decisionStartState := x.(BlockStartState) - blockEndStateNum := decisionStartState.getEndState().stateNumber - blockEndState := p.atn.states[blockEndStateNum].(*BlockEndState) - - // Verify that the top of each stack context leads to loop entry/exit - // state through epsilon edges and w/o leaving rule. - - for i := 0; i < numCtxs; i++ { // for each stack context - returnStateNumber := config.GetContext().getReturnState(i) - returnState := p.atn.states[returnStateNumber] - - // all states must have single outgoing epsilon edge - if len(returnState.GetTransitions()) != 1 || !returnState.GetTransitions()[0].getIsEpsilon() { - return false - } - - // Look for prefix op case like 'not expr', (' type ')' expr - returnStateTarget := returnState.GetTransitions()[0].getTarget() - if returnState.GetStateType() == ATNStateBlockEnd && returnStateTarget == _p { - continue - } - - // Look for 'expr op expr' or case where expr's return state is block end - // of (...)* internal block; the block end points to loop back - // which points to p but we don't need to check that - if returnState == blockEndState { - continue - } - - // Look for ternary expr ? expr : expr. The return state points at block end, - // which points at loop entry state - if returnStateTarget == blockEndState { - continue - } - - // Look for complex prefix 'between expr and expr' case where 2nd expr's - // return state points at block end state of (...)* internal block - if returnStateTarget.GetStateType() == ATNStateBlockEnd && - len(returnStateTarget.GetTransitions()) == 1 && - returnStateTarget.GetTransitions()[0].getIsEpsilon() && - returnStateTarget.GetTransitions()[0].getTarget() == _p { - continue - } - - // anything else ain't conforming - return false - } - - return true -} - -func (p *ParserATNSimulator) getRuleName(index int) string { - if p.parser != nil && index >= 0 { - return p.parser.GetRuleNames()[index] - } - var sb strings.Builder - sb.Grow(32) - - sb.WriteString("') - return sb.String() -} - -func (p *ParserATNSimulator) getEpsilonTarget(config ATNConfig, t Transition, collectPredicates, inContext, fullCtx, treatEOFAsEpsilon bool) ATNConfig { - - switch t.getSerializationType() { - case TransitionRULE: - return p.ruleTransition(config, t.(*RuleTransition)) - case TransitionPRECEDENCE: - return p.precedenceTransition(config, t.(*PrecedencePredicateTransition), collectPredicates, inContext, fullCtx) - case TransitionPREDICATE: - return p.predTransition(config, t.(*PredicateTransition), collectPredicates, inContext, fullCtx) - case TransitionACTION: - return p.actionTransition(config, t.(*ActionTransition)) - case TransitionEPSILON: - return NewBaseATNConfig4(config, t.getTarget()) - case TransitionATOM, TransitionRANGE, TransitionSET: - // EOF transitions act like epsilon transitions after the first EOF - // transition is traversed - if treatEOFAsEpsilon { - if t.Matches(TokenEOF, 0, 1) { - return NewBaseATNConfig4(config, t.getTarget()) - } - } - return nil - default: - return nil - } -} - -func (p *ParserATNSimulator) actionTransition(config ATNConfig, t *ActionTransition) *BaseATNConfig { - if ParserATNSimulatorDebug { - fmt.Println("ACTION edge " + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex)) - } - return NewBaseATNConfig4(config, t.getTarget()) -} - -func (p *ParserATNSimulator) precedenceTransition(config ATNConfig, - pt *PrecedencePredicateTransition, collectPredicates, inContext, fullCtx bool) *BaseATNConfig { - - if ParserATNSimulatorDebug { - fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + - strconv.Itoa(pt.precedence) + ">=_p, ctx dependent=true") - if p.parser != nil { - fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil))) - } - } - var c *BaseATNConfig - if collectPredicates && inContext { - if fullCtx { - // In full context mode, we can evaluate predicates on-the-fly - // during closure, which dramatically reduces the size of - // the config sets. It also obviates the need to test predicates - // later during conflict resolution. - currentPosition := p.input.Index() - p.input.Seek(p.startIndex) - predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext) - p.input.Seek(currentPosition) - if predSucceeds { - c = NewBaseATNConfig4(config, pt.getTarget()) // no pred context - } - } else { - newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate()) - c = NewBaseATNConfig3(config, pt.getTarget(), newSemCtx) - } - } else { - c = NewBaseATNConfig4(config, pt.getTarget()) - } - if ParserATNSimulatorDebug { - fmt.Println("config from pred transition=" + c.String()) - } - return c -} - -func (p *ParserATNSimulator) predTransition(config ATNConfig, pt *PredicateTransition, collectPredicates, inContext, fullCtx bool) *BaseATNConfig { - - if ParserATNSimulatorDebug { - fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + strconv.Itoa(pt.ruleIndex) + - ":" + strconv.Itoa(pt.predIndex) + ", ctx dependent=" + fmt.Sprint(pt.isCtxDependent)) - if p.parser != nil { - fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil))) - } - } - var c *BaseATNConfig - if collectPredicates && (!pt.isCtxDependent || inContext) { - if fullCtx { - // In full context mode, we can evaluate predicates on-the-fly - // during closure, which dramatically reduces the size of - // the config sets. It also obviates the need to test predicates - // later during conflict resolution. - currentPosition := p.input.Index() - p.input.Seek(p.startIndex) - predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext) - p.input.Seek(currentPosition) - if predSucceeds { - c = NewBaseATNConfig4(config, pt.getTarget()) // no pred context - } - } else { - newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate()) - c = NewBaseATNConfig3(config, pt.getTarget(), newSemCtx) - } - } else { - c = NewBaseATNConfig4(config, pt.getTarget()) - } - if ParserATNSimulatorDebug { - fmt.Println("config from pred transition=" + c.String()) - } - return c -} - -func (p *ParserATNSimulator) ruleTransition(config ATNConfig, t *RuleTransition) *BaseATNConfig { - if ParserATNSimulatorDebug { - fmt.Println("CALL rule " + p.getRuleName(t.getTarget().GetRuleIndex()) + ", ctx=" + config.GetContext().String()) - } - returnState := t.followState - newContext := SingletonBasePredictionContextCreate(config.GetContext(), returnState.GetStateNumber()) - return NewBaseATNConfig1(config, t.getTarget(), newContext) -} - -func (p *ParserATNSimulator) getConflictingAlts(configs ATNConfigSet) *BitSet { - altsets := PredictionModegetConflictingAltSubsets(configs) - return PredictionModeGetAlts(altsets) -} - -// Sam pointed out a problem with the previous definition, v3, of -// ambiguous states. If we have another state associated with conflicting -// alternatives, we should keep going. For example, the following grammar -// -// s : (ID | ID ID?) '' -// -// When the ATN simulation reaches the state before '', it has a DFA -// state that looks like: [12|1|[], 6|2|[], 12|2|[]]. Naturally -// 12|1|[] and 12|2|[] conflict, but we cannot stop processing p node -// because alternative to has another way to continue, via [6|2|[]]. -// The key is that we have a single state that has config's only associated -// with a single alternative, 2, and crucially the state transitions -// among the configurations are all non-epsilon transitions. That means -// we don't consider any conflicts that include alternative 2. So, we -// ignore the conflict between alts 1 and 2. We ignore a set of -// conflicting alts when there is an intersection with an alternative -// associated with a single alt state in the state&rarrconfig-list map. -// -// It's also the case that we might have two conflicting configurations but -// also a 3rd nonconflicting configuration for a different alternative: -// [1|1|[], 1|2|[], 8|3|[]]. This can come about from grammar: -// -// a : A | A | A B -// -// After Matching input A, we reach the stop state for rule A, state 1. -// State 8 is the state right before B. Clearly alternatives 1 and 2 -// conflict and no amount of further lookahead will separate the two. -// However, alternative 3 will be able to continue and so we do not -// stop working on p state. In the previous example, we're concerned -// with states associated with the conflicting alternatives. Here alt -// 3 is not associated with the conflicting configs, but since we can continue -// looking for input reasonably, I don't declare the state done. We -// ignore a set of conflicting alts when we have an alternative -// that we still need to pursue. -// - -func (p *ParserATNSimulator) getConflictingAltsOrUniqueAlt(configs ATNConfigSet) *BitSet { - var conflictingAlts *BitSet - if configs.GetUniqueAlt() != ATNInvalidAltNumber { - conflictingAlts = NewBitSet() - conflictingAlts.add(configs.GetUniqueAlt()) - } else { - conflictingAlts = configs.GetConflictingAlts() - } - return conflictingAlts -} - -func (p *ParserATNSimulator) GetTokenName(t int) string { - if t == TokenEOF { - return "EOF" - } - - if p.parser != nil && p.parser.GetLiteralNames() != nil && t < len(p.parser.GetLiteralNames()) { - return p.parser.GetLiteralNames()[t] + "<" + strconv.Itoa(t) + ">" - } - - if p.parser != nil && p.parser.GetLiteralNames() != nil && t < len(p.parser.GetSymbolicNames()) { - return p.parser.GetSymbolicNames()[t] + "<" + strconv.Itoa(t) + ">" - } - - return strconv.Itoa(t) -} - -func (p *ParserATNSimulator) getLookaheadName(input TokenStream) string { - return p.GetTokenName(input.LA(1)) -} - -// Used for debugging in AdaptivePredict around execATN but I cut -// -// it out for clarity now that alg. works well. We can leave p -// "dead" code for a bit. -func (p *ParserATNSimulator) dumpDeadEndConfigs(nvae *NoViableAltException) { - - panic("Not implemented") - - // fmt.Println("dead end configs: ") - // var decs = nvae.deadEndConfigs - // - // for i:=0; i0) { - // var t = c.state.GetTransitions()[0] - // if t2, ok := t.(*AtomTransition); ok { - // trans = "Atom "+ p.GetTokenName(t2.label) - // } else if t3, ok := t.(SetTransition); ok { - // _, ok := t.(*NotSetTransition) - // - // var s string - // if (ok){ - // s = "~" - // } - // - // trans = s + "Set " + t3.set - // } - // } - // fmt.Errorf(c.String(p.parser, true) + ":" + trans) - // } -} - -func (p *ParserATNSimulator) noViableAlt(input TokenStream, outerContext ParserRuleContext, configs ATNConfigSet, startIndex int) *NoViableAltException { - return NewNoViableAltException(p.parser, input, input.Get(startIndex), input.LT(1), configs, outerContext) -} - -func (p *ParserATNSimulator) getUniqueAlt(configs ATNConfigSet) int { - alt := ATNInvalidAltNumber - for _, c := range configs.GetItems() { - if alt == ATNInvalidAltNumber { - alt = c.GetAlt() // found first alt - } else if c.GetAlt() != alt { - return ATNInvalidAltNumber - } - } - return alt -} - -// Add an edge to the DFA, if possible. This method calls -// {@link //addDFAState} to ensure the {@code to} state is present in the -// DFA. If {@code from} is {@code nil}, or if {@code t} is outside the -// range of edges that can be represented in the DFA tables, p method -// returns without adding the edge to the DFA. -// -//

If {@code to} is {@code nil}, p method returns {@code nil}. -// Otherwise, p method returns the {@link DFAState} returned by calling -// {@link //addDFAState} for the {@code to} state.

-// -// @param dfa The DFA -// @param from The source state for the edge -// @param t The input symbol -// @param to The target state for the edge -// -// @return If {@code to} is {@code nil}, p method returns {@code nil} -// otherwise p method returns the result of calling {@link //addDFAState} -// on {@code to} -func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFAState) *DFAState { - if ParserATNSimulatorDebug { - fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + p.GetTokenName(t)) - } - if to == nil { - return nil - } - p.atn.stateMu.Lock() - to = p.addDFAState(dfa, to) // used existing if possible not incoming - p.atn.stateMu.Unlock() - if from == nil || t < -1 || t > p.atn.maxTokenType { - return to - } - p.atn.edgeMu.Lock() - if from.getEdges() == nil { - from.setEdges(make([]*DFAState, p.atn.maxTokenType+1+1)) - } - from.setIthEdge(t+1, to) // connect - p.atn.edgeMu.Unlock() - - if ParserATNSimulatorDebug { - var names []string - if p.parser != nil { - names = p.parser.GetLiteralNames() - } - - fmt.Println("DFA=\n" + dfa.String(names, nil)) - } - return to -} - -// Add state {@code D} to the DFA if it is not already present, and return -// the actual instance stored in the DFA. If a state equivalent to {@code D} -// is already in the DFA, the existing state is returned. Otherwise p -// method returns {@code D} after adding it to the DFA. -// -//

If {@code D} is {@link //ERROR}, p method returns {@link //ERROR} and -// does not change the DFA.

-// -// @param dfa The dfa -// @param D The DFA state to add -// @return The state stored in the DFA. This will be either the existing -// state if {@code D} is already in the DFA, or {@code D} itself if the -// state was not already present. -func (p *ParserATNSimulator) addDFAState(dfa *DFA, d *DFAState) *DFAState { - if d == ATNSimulatorError { - return d - } - existing, present := dfa.states.Get(d) - if present { - if ParserATNSimulatorTraceATNSim { - fmt.Print("addDFAState " + d.String() + " exists") - } - return existing - } - - // The state was not present, so update it with configs - // - d.stateNumber = dfa.states.Len() - if !d.configs.ReadOnly() { - d.configs.OptimizeConfigs(p.BaseATNSimulator) - d.configs.SetReadOnly(true) - } - dfa.states.Put(d) - if ParserATNSimulatorTraceATNSim { - fmt.Println("addDFAState new " + d.String()) - } - - return d -} - -func (p *ParserATNSimulator) ReportAttemptingFullContext(dfa *DFA, conflictingAlts *BitSet, configs ATNConfigSet, startIndex, stopIndex int) { - if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug { - interval := NewInterval(startIndex, stopIndex+1) - fmt.Println("ReportAttemptingFullContext decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() + - ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) - } - if p.parser != nil { - p.parser.GetErrorListenerDispatch().ReportAttemptingFullContext(p.parser, dfa, startIndex, stopIndex, conflictingAlts, configs) - } -} - -func (p *ParserATNSimulator) ReportContextSensitivity(dfa *DFA, prediction int, configs ATNConfigSet, startIndex, stopIndex int) { - if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug { - interval := NewInterval(startIndex, stopIndex+1) - fmt.Println("ReportContextSensitivity decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() + - ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) - } - if p.parser != nil { - p.parser.GetErrorListenerDispatch().ReportContextSensitivity(p.parser, dfa, startIndex, stopIndex, prediction, configs) - } -} - -// If context sensitive parsing, we know it's ambiguity not conflict// -func (p *ParserATNSimulator) ReportAmbiguity(dfa *DFA, D *DFAState, startIndex, stopIndex int, - exact bool, ambigAlts *BitSet, configs ATNConfigSet) { - if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug { - interval := NewInterval(startIndex, stopIndex+1) - fmt.Println("ReportAmbiguity " + ambigAlts.String() + ":" + configs.String() + - ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) - } - if p.parser != nil { - p.parser.GetErrorListenerDispatch().ReportAmbiguity(p.parser, dfa, startIndex, stopIndex, exact, ambigAlts, configs) - } -} +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" + "strings" +) + +var ( + ParserATNSimulatorDebug = false + ParserATNSimulatorTraceATNSim = false + ParserATNSimulatorDFADebug = false + ParserATNSimulatorRetryDebug = false + TurnOffLRLoopEntryBranchOpt = false +) + +type ParserATNSimulator struct { + *BaseATNSimulator + + parser Parser + predictionMode int + input TokenStream + startIndex int + dfa *DFA + mergeCache *DoubleDict + outerContext ParserRuleContext +} + +func NewParserATNSimulator(parser Parser, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *ParserATNSimulator { + + p := new(ParserATNSimulator) + + p.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache) + + p.parser = parser + p.decisionToDFA = decisionToDFA + // SLL, LL, or LL + exact ambig detection?// + p.predictionMode = PredictionModeLL + // LAME globals to avoid parameters!!!!! I need these down deep in predTransition + p.input = nil + p.startIndex = 0 + p.outerContext = nil + p.dfa = nil + // Each prediction operation uses a cache for merge of prediction contexts. + // Don't keep around as it wastes huge amounts of memory. DoubleKeyMap + // isn't Synchronized but we're ok since two threads shouldn't reuse same + // parser/atnsim object because it can only handle one input at a time. + // This maps graphs a and b to merged result c. (a,b)&rarrc. We can avoid + // the merge if we ever see a and b again. Note that (b,a)&rarrc should + // also be examined during cache lookup. + // + p.mergeCache = nil + + return p +} + +func (p *ParserATNSimulator) GetPredictionMode() int { + return p.predictionMode +} + +func (p *ParserATNSimulator) SetPredictionMode(v int) { + p.predictionMode = v +} + +func (p *ParserATNSimulator) reset() { +} + +func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, outerContext ParserRuleContext) int { + if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim { + fmt.Println("adaptivePredict decision " + strconv.Itoa(decision) + + " exec LA(1)==" + p.getLookaheadName(input) + + " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + + strconv.Itoa(input.LT(1).GetColumn())) + } + + p.input = input + p.startIndex = input.Index() + p.outerContext = outerContext + + dfa := p.decisionToDFA[decision] + p.dfa = dfa + m := input.Mark() + index := input.Index() + + defer func() { + p.dfa = nil + p.mergeCache = nil // wack cache after each prediction + input.Seek(index) + input.Release(m) + }() + + // Now we are certain to have a specific decision's DFA + // But, do we still need an initial state? + var s0 *DFAState + p.atn.stateMu.RLock() + if dfa.getPrecedenceDfa() { + p.atn.edgeMu.RLock() + // the start state for a precedence DFA depends on the current + // parser precedence, and is provided by a DFA method. + s0 = dfa.getPrecedenceStartState(p.parser.GetPrecedence()) + p.atn.edgeMu.RUnlock() + } else { + // the start state for a "regular" DFA is just s0 + s0 = dfa.getS0() + } + p.atn.stateMu.RUnlock() + + if s0 == nil { + if outerContext == nil { + outerContext = ParserRuleContextEmpty + } + if ParserATNSimulatorDebug { + fmt.Println("predictATN decision " + strconv.Itoa(dfa.decision) + + " exec LA(1)==" + p.getLookaheadName(input) + + ", outerContext=" + outerContext.String(p.parser.GetRuleNames(), nil)) + } + fullCtx := false + s0Closure := p.computeStartState(dfa.atnStartState, ParserRuleContextEmpty, fullCtx) + + p.atn.stateMu.Lock() + if dfa.getPrecedenceDfa() { + // If p is a precedence DFA, we use applyPrecedenceFilter + // to convert the computed start state to a precedence start + // state. We then use DFA.setPrecedenceStartState to set the + // appropriate start state for the precedence level rather + // than simply setting DFA.s0. + // + dfa.s0.configs = s0Closure + s0Closure = p.applyPrecedenceFilter(s0Closure) + s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure)) + p.atn.edgeMu.Lock() + dfa.setPrecedenceStartState(p.parser.GetPrecedence(), s0) + p.atn.edgeMu.Unlock() + } else { + s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure)) + dfa.setS0(s0) + } + p.atn.stateMu.Unlock() + } + + alt := p.execATN(dfa, s0, input, index, outerContext) + if ParserATNSimulatorDebug { + fmt.Println("DFA after predictATN: " + dfa.String(p.parser.GetLiteralNames(), nil)) + } + return alt + +} + +// Performs ATN simulation to compute a predicted alternative based +// upon the remaining input, but also updates the DFA cache to avoid +// having to traverse the ATN again for the same input sequence. + +// There are some key conditions we're looking for after computing a new +// set of ATN configs (proposed DFA state): +// if the set is empty, there is no viable alternative for current symbol +// does the state uniquely predict an alternative? +// does the state have a conflict that would prevent us from +// putting it on the work list? + +// We also have some key operations to do: +// add an edge from previous DFA state to potentially NewDFA state, D, +// upon current symbol but only if adding to work list, which means in all +// cases except no viable alternative (and possibly non-greedy decisions?) +// collecting predicates and adding semantic context to DFA accept states +// adding rule context to context-sensitive DFA accept states +// consuming an input symbol +// Reporting a conflict +// Reporting an ambiguity +// Reporting a context sensitivity +// Reporting insufficient predicates + +// cover these cases: +// +// dead end +// single alt +// single alt + preds +// conflict +// conflict + preds +func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, startIndex int, outerContext ParserRuleContext) int { + + if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim { + fmt.Println("execATN decision " + strconv.Itoa(dfa.decision) + + ", DFA state " + s0.String() + + ", LA(1)==" + p.getLookaheadName(input) + + " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + strconv.Itoa(input.LT(1).GetColumn())) + } + + previousD := s0 + + if ParserATNSimulatorDebug { + fmt.Println("s0 = " + s0.String()) + } + t := input.LA(1) + for { // for more work + D := p.getExistingTargetState(previousD, t) + if D == nil { + D = p.computeTargetState(dfa, previousD, t) + } + if D == ATNSimulatorError { + // if any configs in previous dipped into outer context, that + // means that input up to t actually finished entry rule + // at least for SLL decision. Full LL doesn't dip into outer + // so don't need special case. + // We will get an error no matter what so delay until after + // decision better error message. Also, no reachable target + // ATN states in SLL implies LL will also get nowhere. + // If conflict in states that dip out, choose min since we + // will get error no matter what. + e := p.noViableAlt(input, outerContext, previousD.configs, startIndex) + input.Seek(startIndex) + alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previousD.configs, outerContext) + if alt != ATNInvalidAltNumber { + return alt + } + + panic(e) + } + if D.requiresFullContext && p.predictionMode != PredictionModeSLL { + // IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error) + conflictingAlts := D.configs.GetConflictingAlts() + if D.predicates != nil { + if ParserATNSimulatorDebug { + fmt.Println("DFA state has preds in DFA sim LL failover") + } + conflictIndex := input.Index() + if conflictIndex != startIndex { + input.Seek(startIndex) + } + conflictingAlts = p.evalSemanticContext(D.predicates, outerContext, true) + if conflictingAlts.length() == 1 { + if ParserATNSimulatorDebug { + fmt.Println("Full LL avoided") + } + return conflictingAlts.minValue() + } + if conflictIndex != startIndex { + // restore the index so Reporting the fallback to full + // context occurs with the index at the correct spot + input.Seek(conflictIndex) + } + } + if ParserATNSimulatorDFADebug { + fmt.Println("ctx sensitive state " + outerContext.String(nil, nil) + " in " + D.String()) + } + fullCtx := true + s0Closure := p.computeStartState(dfa.atnStartState, outerContext, fullCtx) + p.ReportAttemptingFullContext(dfa, conflictingAlts, D.configs, startIndex, input.Index()) + alt := p.execATNWithFullContext(dfa, D, s0Closure, input, startIndex, outerContext) + return alt + } + if D.isAcceptState { + if D.predicates == nil { + return D.prediction + } + stopIndex := input.Index() + input.Seek(startIndex) + alts := p.evalSemanticContext(D.predicates, outerContext, true) + + switch alts.length() { + case 0: + panic(p.noViableAlt(input, outerContext, D.configs, startIndex)) + case 1: + return alts.minValue() + default: + // Report ambiguity after predicate evaluation to make sure the correct set of ambig alts is Reported. + p.ReportAmbiguity(dfa, D, startIndex, stopIndex, false, alts, D.configs) + return alts.minValue() + } + } + previousD = D + + if t != TokenEOF { + input.Consume() + t = input.LA(1) + } + } +} + +// Get an existing target state for an edge in the DFA. If the target state +// for the edge has not yet been computed or is otherwise not available, +// p method returns {@code nil}. +// +// @param previousD The current DFA state +// @param t The next input symbol +// @return The existing target DFA state for the given input symbol +// {@code t}, or {@code nil} if the target state for p edge is not +// already cached + +func (p *ParserATNSimulator) getExistingTargetState(previousD *DFAState, t int) *DFAState { + if t+1 < 0 { + return nil + } + + p.atn.edgeMu.RLock() + defer p.atn.edgeMu.RUnlock() + edges := previousD.getEdges() + if edges == nil || t+1 >= len(edges) { + return nil + } + return previousD.getIthEdge(t + 1) +} + +// Compute a target state for an edge in the DFA, and attempt to add the +// computed state and corresponding edge to the DFA. +// +// @param dfa The DFA +// @param previousD The current DFA state +// @param t The next input symbol +// +// @return The computed target DFA state for the given input symbol +// {@code t}. If {@code t} does not lead to a valid DFA state, p method +// returns {@link //ERROR}. + +func (p *ParserATNSimulator) computeTargetState(dfa *DFA, previousD *DFAState, t int) *DFAState { + reach := p.computeReachSet(previousD.configs, t, false) + + if reach == nil { + p.addDFAEdge(dfa, previousD, t, ATNSimulatorError) + return ATNSimulatorError + } + // create Newtarget state we'll add to DFA after it's complete + D := NewDFAState(-1, reach) + + predictedAlt := p.getUniqueAlt(reach) + + if ParserATNSimulatorDebug { + altSubSets := PredictionModegetConflictingAltSubsets(reach) + fmt.Println("SLL altSubSets=" + fmt.Sprint(altSubSets) + + ", previous=" + previousD.configs.String() + + ", configs=" + reach.String() + + ", predict=" + strconv.Itoa(predictedAlt) + + ", allSubsetsConflict=" + + fmt.Sprint(PredictionModeallSubsetsConflict(altSubSets)) + + ", conflictingAlts=" + p.getConflictingAlts(reach).String()) + } + if predictedAlt != ATNInvalidAltNumber { + // NO CONFLICT, UNIQUELY PREDICTED ALT + D.isAcceptState = true + D.configs.SetUniqueAlt(predictedAlt) + D.setPrediction(predictedAlt) + } else if PredictionModehasSLLConflictTerminatingPrediction(p.predictionMode, reach) { + // MORE THAN ONE VIABLE ALTERNATIVE + D.configs.SetConflictingAlts(p.getConflictingAlts(reach)) + D.requiresFullContext = true + // in SLL-only mode, we will stop at p state and return the minimum alt + D.isAcceptState = true + D.setPrediction(D.configs.GetConflictingAlts().minValue()) + } + if D.isAcceptState && D.configs.HasSemanticContext() { + p.predicateDFAState(D, p.atn.getDecisionState(dfa.decision)) + if D.predicates != nil { + D.setPrediction(ATNInvalidAltNumber) + } + } + // all adds to dfa are done after we've created full D state + D = p.addDFAEdge(dfa, previousD, t, D) + return D +} + +func (p *ParserATNSimulator) predicateDFAState(dfaState *DFAState, decisionState DecisionState) { + // We need to test all predicates, even in DFA states that + // uniquely predict alternative. + nalts := len(decisionState.GetTransitions()) + // Update DFA so reach becomes accept state with (predicate,alt) + // pairs if preds found for conflicting alts + altsToCollectPredsFrom := p.getConflictingAltsOrUniqueAlt(dfaState.configs) + altToPred := p.getPredsForAmbigAlts(altsToCollectPredsFrom, dfaState.configs, nalts) + if altToPred != nil { + dfaState.predicates = p.getPredicatePredictions(altsToCollectPredsFrom, altToPred) + dfaState.setPrediction(ATNInvalidAltNumber) // make sure we use preds + } else { + // There are preds in configs but they might go away + // when OR'd together like {p}? || NONE == NONE. If neither + // alt has preds, resolve to min alt + dfaState.setPrediction(altsToCollectPredsFrom.minValue()) + } +} + +// comes back with reach.uniqueAlt set to a valid alt +func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 ATNConfigSet, input TokenStream, startIndex int, outerContext ParserRuleContext) int { + + if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim { + fmt.Println("execATNWithFullContext " + s0.String()) + } + + fullCtx := true + foundExactAmbig := false + var reach ATNConfigSet + previous := s0 + input.Seek(startIndex) + t := input.LA(1) + predictedAlt := -1 + + for { // for more work + reach = p.computeReachSet(previous, t, fullCtx) + if reach == nil { + // if any configs in previous dipped into outer context, that + // means that input up to t actually finished entry rule + // at least for LL decision. Full LL doesn't dip into outer + // so don't need special case. + // We will get an error no matter what so delay until after + // decision better error message. Also, no reachable target + // ATN states in SLL implies LL will also get nowhere. + // If conflict in states that dip out, choose min since we + // will get error no matter what. + e := p.noViableAlt(input, outerContext, previous, startIndex) + input.Seek(startIndex) + alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previous, outerContext) + if alt != ATNInvalidAltNumber { + return alt + } + + panic(e) + } + altSubSets := PredictionModegetConflictingAltSubsets(reach) + if ParserATNSimulatorDebug { + fmt.Println("LL altSubSets=" + fmt.Sprint(altSubSets) + ", predict=" + + strconv.Itoa(PredictionModegetUniqueAlt(altSubSets)) + ", resolvesToJustOneViableAlt=" + + fmt.Sprint(PredictionModeresolvesToJustOneViableAlt(altSubSets))) + } + reach.SetUniqueAlt(p.getUniqueAlt(reach)) + // unique prediction? + if reach.GetUniqueAlt() != ATNInvalidAltNumber { + predictedAlt = reach.GetUniqueAlt() + break + } + if p.predictionMode != PredictionModeLLExactAmbigDetection { + predictedAlt = PredictionModeresolvesToJustOneViableAlt(altSubSets) + if predictedAlt != ATNInvalidAltNumber { + break + } + } else { + // In exact ambiguity mode, we never try to terminate early. + // Just keeps scarfing until we know what the conflict is + if PredictionModeallSubsetsConflict(altSubSets) && PredictionModeallSubsetsEqual(altSubSets) { + foundExactAmbig = true + predictedAlt = PredictionModegetSingleViableAlt(altSubSets) + break + } + // else there are multiple non-conflicting subsets or + // we're not sure what the ambiguity is yet. + // So, keep going. + } + previous = reach + if t != TokenEOF { + input.Consume() + t = input.LA(1) + } + } + // If the configuration set uniquely predicts an alternative, + // without conflict, then we know that it's a full LL decision + // not SLL. + if reach.GetUniqueAlt() != ATNInvalidAltNumber { + p.ReportContextSensitivity(dfa, predictedAlt, reach, startIndex, input.Index()) + return predictedAlt + } + // We do not check predicates here because we have checked them + // on-the-fly when doing full context prediction. + + // + // In non-exact ambiguity detection mode, we might actually be able to + // detect an exact ambiguity, but I'm not going to spend the cycles + // needed to check. We only emit ambiguity warnings in exact ambiguity + // mode. + // + // For example, we might know that we have conflicting configurations. + // But, that does not mean that there is no way forward without a + // conflict. It's possible to have nonconflicting alt subsets as in: + + // altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}] + + // from + // + // [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]), + // (13,2,[5 10 $]), (21,2,[5 10 $]), (11,2,[$])] + // + // In p case, (17,1,[5 $]) indicates there is some next sequence that + // would resolve p without conflict to alternative 1. Any other viable + // next sequence, however, is associated with a conflict. We stop + // looking for input because no amount of further lookahead will alter + // the fact that we should predict alternative 1. We just can't say for + // sure that there is an ambiguity without looking further. + + p.ReportAmbiguity(dfa, D, startIndex, input.Index(), foundExactAmbig, reach.Alts(), reach) + + return predictedAlt +} + +func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCtx bool) ATNConfigSet { + if p.mergeCache == nil { + p.mergeCache = NewDoubleDict() + } + intermediate := NewBaseATNConfigSet(fullCtx) + + // Configurations already in a rule stop state indicate reaching the end + // of the decision rule (local context) or end of the start rule (full + // context). Once reached, these configurations are never updated by a + // closure operation, so they are handled separately for the performance + // advantage of having a smaller intermediate set when calling closure. + // + // For full-context reach operations, separate handling is required to + // ensure that the alternative Matching the longest overall sequence is + // chosen when multiple such configurations can Match the input. + + var skippedStopStates []*BaseATNConfig + + // First figure out where we can reach on input t + for _, c := range closure.GetItems() { + if ParserATNSimulatorDebug { + fmt.Println("testing " + p.GetTokenName(t) + " at " + c.String()) + } + + if _, ok := c.GetState().(*RuleStopState); ok { + if fullCtx || t == TokenEOF { + skippedStopStates = append(skippedStopStates, c.(*BaseATNConfig)) + if ParserATNSimulatorDebug { + fmt.Println("added " + c.String() + " to SkippedStopStates") + } + } + continue + } + + for _, trans := range c.GetState().GetTransitions() { + target := p.getReachableTarget(trans, t) + if target != nil { + cfg := NewBaseATNConfig4(c, target) + intermediate.Add(cfg, p.mergeCache) + if ParserATNSimulatorDebug { + fmt.Println("added " + cfg.String() + " to intermediate") + } + } + } + } + + // Now figure out where the reach operation can take us... + var reach ATNConfigSet + + // This block optimizes the reach operation for intermediate sets which + // trivially indicate a termination state for the overall + // AdaptivePredict operation. + // + // The conditions assume that intermediate + // contains all configurations relevant to the reach set, but p + // condition is not true when one or more configurations have been + // withheld in SkippedStopStates, or when the current symbol is EOF. + // + if skippedStopStates == nil && t != TokenEOF { + if len(intermediate.configs) == 1 { + // Don't pursue the closure if there is just one state. + // It can only have one alternative just add to result + // Also don't pursue the closure if there is unique alternative + // among the configurations. + reach = intermediate + } else if p.getUniqueAlt(intermediate) != ATNInvalidAltNumber { + // Also don't pursue the closure if there is unique alternative + // among the configurations. + reach = intermediate + } + } + // If the reach set could not be trivially determined, perform a closure + // operation on the intermediate set to compute its initial value. + // + if reach == nil { + reach = NewBaseATNConfigSet(fullCtx) + closureBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](aConfEqInst) + treatEOFAsEpsilon := t == TokenEOF + amount := len(intermediate.configs) + for k := 0; k < amount; k++ { + p.closure(intermediate.configs[k], reach, closureBusy, false, fullCtx, treatEOFAsEpsilon) + } + } + if t == TokenEOF { + // After consuming EOF no additional input is possible, so we are + // only interested in configurations which reached the end of the + // decision rule (local context) or end of the start rule (full + // context). Update reach to contain only these configurations. This + // handles both explicit EOF transitions in the grammar and implicit + // EOF transitions following the end of the decision or start rule. + // + // When reach==intermediate, no closure operation was performed. In + // p case, removeAllConfigsNotInRuleStopState needs to check for + // reachable rule stop states as well as configurations already in + // a rule stop state. + // + // This is handled before the configurations in SkippedStopStates, + // because any configurations potentially added from that list are + // already guaranteed to meet p condition whether or not it's + // required. + // + reach = p.removeAllConfigsNotInRuleStopState(reach, reach == intermediate) + } + // If SkippedStopStates!=nil, then it contains at least one + // configuration. For full-context reach operations, these + // configurations reached the end of the start rule, in which case we + // only add them back to reach if no configuration during the current + // closure operation reached such a state. This ensures AdaptivePredict + // chooses an alternative Matching the longest overall sequence when + // multiple alternatives are viable. + // + if skippedStopStates != nil && ((!fullCtx) || (!PredictionModehasConfigInRuleStopState(reach))) { + for l := 0; l < len(skippedStopStates); l++ { + reach.Add(skippedStopStates[l], p.mergeCache) + } + } + + if ParserATNSimulatorTraceATNSim { + fmt.Println("computeReachSet " + closure.String() + " -> " + reach.String()) + } + + if len(reach.GetItems()) == 0 { + return nil + } + + return reach +} + +// Return a configuration set containing only the configurations from +// {@code configs} which are in a {@link RuleStopState}. If all +// configurations in {@code configs} are already in a rule stop state, p +// method simply returns {@code configs}. +// +//

When {@code lookToEndOfRule} is true, p method uses +// {@link ATN//NextTokens} for each configuration in {@code configs} which is +// not already in a rule stop state to see if a rule stop state is reachable +// from the configuration via epsilon-only transitions.

+// +// @param configs the configuration set to update +// @param lookToEndOfRule when true, p method checks for rule stop states +// reachable by epsilon-only transitions from each configuration in +// {@code configs}. +// +// @return {@code configs} if all configurations in {@code configs} are in a +// rule stop state, otherwise return a Newconfiguration set containing only +// the configurations from {@code configs} which are in a rule stop state +func (p *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs ATNConfigSet, lookToEndOfRule bool) ATNConfigSet { + if PredictionModeallConfigsInRuleStopStates(configs) { + return configs + } + result := NewBaseATNConfigSet(configs.FullContext()) + for _, config := range configs.GetItems() { + if _, ok := config.GetState().(*RuleStopState); ok { + result.Add(config, p.mergeCache) + continue + } + if lookToEndOfRule && config.GetState().GetEpsilonOnlyTransitions() { + NextTokens := p.atn.NextTokens(config.GetState(), nil) + if NextTokens.contains(TokenEpsilon) { + endOfRuleState := p.atn.ruleToStopState[config.GetState().GetRuleIndex()] + result.Add(NewBaseATNConfig4(config, endOfRuleState), p.mergeCache) + } + } + } + return result +} + +func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, fullCtx bool) ATNConfigSet { + // always at least the implicit call to start rule + initialContext := predictionContextFromRuleContext(p.atn, ctx) + configs := NewBaseATNConfigSet(fullCtx) + if ParserATNSimulatorDebug || ParserATNSimulatorTraceATNSim { + fmt.Println("computeStartState from ATN state " + a.String() + + " initialContext=" + initialContext.String()) + } + + for i := 0; i < len(a.GetTransitions()); i++ { + target := a.GetTransitions()[i].getTarget() + c := NewBaseATNConfig6(target, i+1, initialContext) + closureBusy := NewJStore[ATNConfig, Comparator[ATNConfig]](atnConfCompInst) + p.closure(c, configs, closureBusy, true, fullCtx, false) + } + return configs +} + +// This method transforms the start state computed by +// {@link //computeStartState} to the special start state used by a +// precedence DFA for a particular precedence value. The transformation +// process applies the following changes to the start state's configuration +// set. +// +//
    +//
  1. Evaluate the precedence predicates for each configuration using +// {@link SemanticContext//evalPrecedence}.
  2. +//
  3. Remove all configurations which predict an alternative greater than +// 1, for which another configuration that predicts alternative 1 is in the +// same ATN state with the same prediction context. This transformation is +// valid for the following reasons: +//
      +//
    • The closure block cannot contain any epsilon transitions which bypass +// the body of the closure, so all states reachable via alternative 1 are +// part of the precedence alternatives of the transformed left-recursive +// rule.
    • +//
    • The "primary" portion of a left recursive rule cannot contain an +// epsilon transition, so the only way an alternative other than 1 can exist +// in a state that is also reachable via alternative 1 is by nesting calls +// to the left-recursive rule, with the outer calls not being at the +// preferred precedence level.
    • +//
    +//
  4. +//
+// +//

+// The prediction context must be considered by p filter to address +// situations like the following. +//

+// +//
+// grammar TA
+// prog: statement* EOF
+// statement: letterA | statement letterA 'b'
+// letterA: 'a'
+// 
+//
+//

+// If the above grammar, the ATN state immediately before the token +// reference {@code 'a'} in {@code letterA} is reachable from the left edge +// of both the primary and closure blocks of the left-recursive rule +// {@code statement}. The prediction context associated with each of these +// configurations distinguishes between them, and prevents the alternative +// which stepped out to {@code prog} (and then back in to {@code statement} +// from being eliminated by the filter. +//

+// +// @param configs The configuration set computed by +// {@link //computeStartState} as the start state for the DFA. +// @return The transformed configuration set representing the start state +// for a precedence DFA at a particular precedence level (determined by +// calling {@link Parser//getPrecedence}). +func (p *ParserATNSimulator) applyPrecedenceFilter(configs ATNConfigSet) ATNConfigSet { + + statesFromAlt1 := make(map[int]PredictionContext) + configSet := NewBaseATNConfigSet(configs.FullContext()) + + for _, config := range configs.GetItems() { + // handle alt 1 first + if config.GetAlt() != 1 { + continue + } + updatedContext := config.GetSemanticContext().evalPrecedence(p.parser, p.outerContext) + if updatedContext == nil { + // the configuration was eliminated + continue + } + statesFromAlt1[config.GetState().GetStateNumber()] = config.GetContext() + if updatedContext != config.GetSemanticContext() { + configSet.Add(NewBaseATNConfig2(config, updatedContext), p.mergeCache) + } else { + configSet.Add(config, p.mergeCache) + } + } + for _, config := range configs.GetItems() { + + if config.GetAlt() == 1 { + // already handled + continue + } + // In the future, p elimination step could be updated to also + // filter the prediction context for alternatives predicting alt>1 + // (basically a graph subtraction algorithm). + if !config.getPrecedenceFilterSuppressed() { + context := statesFromAlt1[config.GetState().GetStateNumber()] + if context != nil && context.Equals(config.GetContext()) { + // eliminated + continue + } + } + configSet.Add(config, p.mergeCache) + } + return configSet +} + +func (p *ParserATNSimulator) getReachableTarget(trans Transition, ttype int) ATNState { + if trans.Matches(ttype, 0, p.atn.maxTokenType) { + return trans.getTarget() + } + + return nil +} + +func (p *ParserATNSimulator) getPredsForAmbigAlts(ambigAlts *BitSet, configs ATNConfigSet, nalts int) []SemanticContext { + + altToPred := make([]SemanticContext, nalts+1) + for _, c := range configs.GetItems() { + if ambigAlts.contains(c.GetAlt()) { + altToPred[c.GetAlt()] = SemanticContextorContext(altToPred[c.GetAlt()], c.GetSemanticContext()) + } + } + nPredAlts := 0 + for i := 1; i <= nalts; i++ { + pred := altToPred[i] + if pred == nil { + altToPred[i] = SemanticContextNone + } else if pred != SemanticContextNone { + nPredAlts++ + } + } + // nonambig alts are nil in altToPred + if nPredAlts == 0 { + altToPred = nil + } + if ParserATNSimulatorDebug { + fmt.Println("getPredsForAmbigAlts result " + fmt.Sprint(altToPred)) + } + return altToPred +} + +func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPred []SemanticContext) []*PredPrediction { + pairs := make([]*PredPrediction, 0) + containsPredicate := false + for i := 1; i < len(altToPred); i++ { + pred := altToPred[i] + // unpredicated is indicated by SemanticContextNONE + if ambigAlts != nil && ambigAlts.contains(i) { + pairs = append(pairs, NewPredPrediction(pred, i)) + } + if pred != SemanticContextNone { + containsPredicate = true + } + } + if !containsPredicate { + return nil + } + return pairs +} + +// This method is used to improve the localization of error messages by +// choosing an alternative rather than panicing a +// {@link NoViableAltException} in particular prediction scenarios where the +// {@link //ERROR} state was reached during ATN simulation. +// +//

+// The default implementation of p method uses the following +// algorithm to identify an ATN configuration which successfully parsed the +// decision entry rule. Choosing such an alternative ensures that the +// {@link ParserRuleContext} returned by the calling rule will be complete +// and valid, and the syntax error will be Reported later at a more +// localized location.

+// +//
    +//
  • If a syntactically valid path or paths reach the end of the decision rule and +// they are semantically valid if predicated, return the min associated alt.
  • +//
  • Else, if a semantically invalid but syntactically valid path exist +// or paths exist, return the minimum associated alt. +//
  • +//
  • Otherwise, return {@link ATN//INVALID_ALT_NUMBER}.
  • +//
+// +//

+// In some scenarios, the algorithm described above could predict an +// alternative which will result in a {@link FailedPredicateException} in +// the parser. Specifically, p could occur if the only configuration +// capable of successfully parsing to the end of the decision rule is +// blocked by a semantic predicate. By choosing p alternative within +// {@link //AdaptivePredict} instead of panicing a +// {@link NoViableAltException}, the resulting +// {@link FailedPredicateException} in the parser will identify the specific +// predicate which is preventing the parser from successfully parsing the +// decision rule, which helps developers identify and correct logic errors +// in semantic predicates. +//

+// +// @param configs The ATN configurations which were valid immediately before +// the {@link //ERROR} state was reached +// @param outerContext The is the \gamma_0 initial parser context from the paper +// or the parser stack at the instant before prediction commences. +// +// @return The value to return from {@link //AdaptivePredict}, or +// {@link ATN//INVALID_ALT_NUMBER} if a suitable alternative was not +// identified and {@link //AdaptivePredict} should Report an error instead. +func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs ATNConfigSet, outerContext ParserRuleContext) int { + cfgs := p.splitAccordingToSemanticValidity(configs, outerContext) + semValidConfigs := cfgs[0] + semInvalidConfigs := cfgs[1] + alt := p.GetAltThatFinishedDecisionEntryRule(semValidConfigs) + if alt != ATNInvalidAltNumber { // semantically/syntactically viable path exists + return alt + } + // Is there a syntactically valid path with a failed pred? + if len(semInvalidConfigs.GetItems()) > 0 { + alt = p.GetAltThatFinishedDecisionEntryRule(semInvalidConfigs) + if alt != ATNInvalidAltNumber { // syntactically viable path exists + return alt + } + } + return ATNInvalidAltNumber +} + +func (p *ParserATNSimulator) GetAltThatFinishedDecisionEntryRule(configs ATNConfigSet) int { + alts := NewIntervalSet() + + for _, c := range configs.GetItems() { + _, ok := c.GetState().(*RuleStopState) + + if c.GetReachesIntoOuterContext() > 0 || (ok && c.GetContext().hasEmptyPath()) { + alts.addOne(c.GetAlt()) + } + } + if alts.length() == 0 { + return ATNInvalidAltNumber + } + + return alts.first() +} + +// Walk the list of configurations and split them according to +// those that have preds evaluating to true/false. If no pred, assume +// true pred and include in succeeded set. Returns Pair of sets. +// +// Create a NewSet so as not to alter the incoming parameter. +// +// Assumption: the input stream has been restored to the starting point +// prediction, which is where predicates need to evaluate. + +type ATNConfigSetPair struct { + item0, item1 ATNConfigSet +} + +func (p *ParserATNSimulator) splitAccordingToSemanticValidity(configs ATNConfigSet, outerContext ParserRuleContext) []ATNConfigSet { + succeeded := NewBaseATNConfigSet(configs.FullContext()) + failed := NewBaseATNConfigSet(configs.FullContext()) + + for _, c := range configs.GetItems() { + if c.GetSemanticContext() != SemanticContextNone { + predicateEvaluationResult := c.GetSemanticContext().evaluate(p.parser, outerContext) + if predicateEvaluationResult { + succeeded.Add(c, nil) + } else { + failed.Add(c, nil) + } + } else { + succeeded.Add(c, nil) + } + } + return []ATNConfigSet{succeeded, failed} +} + +// Look through a list of predicate/alt pairs, returning alts for the +// +// pairs that win. A {@code NONE} predicate indicates an alt containing an +// unpredicated config which behaves as "always true." If !complete +// then we stop at the first predicate that evaluates to true. This +// includes pairs with nil predicates. +func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPrediction, outerContext ParserRuleContext, complete bool) *BitSet { + predictions := NewBitSet() + for i := 0; i < len(predPredictions); i++ { + pair := predPredictions[i] + if pair.pred == SemanticContextNone { + predictions.add(pair.alt) + if !complete { + break + } + continue + } + + predicateEvaluationResult := pair.pred.evaluate(p.parser, outerContext) + if ParserATNSimulatorDebug || ParserATNSimulatorDFADebug { + fmt.Println("eval pred " + pair.String() + "=" + fmt.Sprint(predicateEvaluationResult)) + } + if predicateEvaluationResult { + if ParserATNSimulatorDebug || ParserATNSimulatorDFADebug { + fmt.Println("PREDICT " + fmt.Sprint(pair.alt)) + } + predictions.add(pair.alt) + if !complete { + break + } + } + } + return predictions +} + +func (p *ParserATNSimulator) closure(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx, treatEOFAsEpsilon bool) { + initialDepth := 0 + p.closureCheckingStopState(config, configs, closureBusy, collectPredicates, + fullCtx, initialDepth, treatEOFAsEpsilon) +} + +func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { + if ParserATNSimulatorTraceATNSim { + fmt.Println("closure(" + config.String() + ")") + //fmt.Println("configs(" + configs.String() + ")") + if config.GetReachesIntoOuterContext() > 50 { + panic("problem") + } + } + + if _, ok := config.GetState().(*RuleStopState); ok { + // We hit rule end. If we have context info, use it + // run thru all possible stack tops in ctx + if !config.GetContext().isEmpty() { + for i := 0; i < config.GetContext().length(); i++ { + if config.GetContext().getReturnState(i) == BasePredictionContextEmptyReturnState { + if fullCtx { + configs.Add(NewBaseATNConfig1(config, config.GetState(), BasePredictionContextEMPTY), p.mergeCache) + continue + } else { + // we have no context info, just chase follow links (if greedy) + if ParserATNSimulatorDebug { + fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex())) + } + p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) + } + continue + } + returnState := p.atn.states[config.GetContext().getReturnState(i)] + newContext := config.GetContext().GetParent(i) // "pop" return state + + c := NewBaseATNConfig5(returnState, config.GetAlt(), newContext, config.GetSemanticContext()) + // While we have context to pop back from, we may have + // gotten that context AFTER having falling off a rule. + // Make sure we track that we are now out of context. + c.SetReachesIntoOuterContext(config.GetReachesIntoOuterContext()) + p.closureCheckingStopState(c, configs, closureBusy, collectPredicates, fullCtx, depth-1, treatEOFAsEpsilon) + } + return + } else if fullCtx { + // reached end of start rule + configs.Add(config, p.mergeCache) + return + } else { + // else if we have no context info, just chase follow links (if greedy) + if ParserATNSimulatorDebug { + fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex())) + } + } + } + p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) +} + +// Do the actual work of walking epsilon edges// +func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet, closureBusy *JStore[ATNConfig, Comparator[ATNConfig]], collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { + state := config.GetState() + // optimization + if !state.GetEpsilonOnlyTransitions() { + configs.Add(config, p.mergeCache) + // make sure to not return here, because EOF transitions can act as + // both epsilon transitions and non-epsilon transitions. + } + for i := 0; i < len(state.GetTransitions()); i++ { + if i == 0 && p.canDropLoopEntryEdgeInLeftRecursiveRule(config) { + continue + } + + t := state.GetTransitions()[i] + _, ok := t.(*ActionTransition) + continueCollecting := collectPredicates && !ok + c := p.getEpsilonTarget(config, t, continueCollecting, depth == 0, fullCtx, treatEOFAsEpsilon) + if ci, ok := c.(*BaseATNConfig); ok && ci != nil { + newDepth := depth + + if _, ok := config.GetState().(*RuleStopState); ok { + // target fell off end of rule mark resulting c as having dipped into outer context + // We can't get here if incoming config was rule stop and we had context + // track how far we dip into outer context. Might + // come in handy and we avoid evaluating context dependent + // preds if p is > 0. + + if p.dfa != nil && p.dfa.getPrecedenceDfa() { + if t.(*EpsilonTransition).outermostPrecedenceReturn == p.dfa.atnStartState.GetRuleIndex() { + c.setPrecedenceFilterSuppressed(true) + } + } + + c.SetReachesIntoOuterContext(c.GetReachesIntoOuterContext() + 1) + + _, present := closureBusy.Put(c) + if present { + // avoid infinite recursion for right-recursive rules + continue + } + + configs.SetDipsIntoOuterContext(true) // TODO: can remove? only care when we add to set per middle of p method + newDepth-- + if ParserATNSimulatorDebug { + fmt.Println("dips into outer ctx: " + c.String()) + } + } else { + + if !t.getIsEpsilon() { + _, present := closureBusy.Put(c) + if present { + // avoid infinite recursion for EOF* and EOF+ + continue + } + } + if _, ok := t.(*RuleTransition); ok { + // latch when newDepth goes negative - once we step out of the entry context we can't return + if newDepth >= 0 { + newDepth++ + } + } + } + p.closureCheckingStopState(c, configs, closureBusy, continueCollecting, fullCtx, newDepth, treatEOFAsEpsilon) + } + } +} + +func (p *ParserATNSimulator) canDropLoopEntryEdgeInLeftRecursiveRule(config ATNConfig) bool { + if TurnOffLRLoopEntryBranchOpt { + return false + } + + _p := config.GetState() + + // First check to see if we are in StarLoopEntryState generated during + // left-recursion elimination. For efficiency, also check if + // the context has an empty stack case. If so, it would mean + // global FOLLOW so we can't perform optimization + if _p.GetStateType() != ATNStateStarLoopEntry { + return false + } + startLoop, ok := _p.(*StarLoopEntryState) + if !ok { + return false + } + if !startLoop.precedenceRuleDecision || + config.GetContext().isEmpty() || + config.GetContext().hasEmptyPath() { + return false + } + + // Require all return states to return back to the same rule + // that p is in. + numCtxs := config.GetContext().length() + for i := 0; i < numCtxs; i++ { + returnState := p.atn.states[config.GetContext().getReturnState(i)] + if returnState.GetRuleIndex() != _p.GetRuleIndex() { + return false + } + } + x := _p.GetTransitions()[0].getTarget() + decisionStartState := x.(BlockStartState) + blockEndStateNum := decisionStartState.getEndState().stateNumber + blockEndState := p.atn.states[blockEndStateNum].(*BlockEndState) + + // Verify that the top of each stack context leads to loop entry/exit + // state through epsilon edges and w/o leaving rule. + + for i := 0; i < numCtxs; i++ { // for each stack context + returnStateNumber := config.GetContext().getReturnState(i) + returnState := p.atn.states[returnStateNumber] + + // all states must have single outgoing epsilon edge + if len(returnState.GetTransitions()) != 1 || !returnState.GetTransitions()[0].getIsEpsilon() { + return false + } + + // Look for prefix op case like 'not expr', (' type ')' expr + returnStateTarget := returnState.GetTransitions()[0].getTarget() + if returnState.GetStateType() == ATNStateBlockEnd && returnStateTarget == _p { + continue + } + + // Look for 'expr op expr' or case where expr's return state is block end + // of (...)* internal block; the block end points to loop back + // which points to p but we don't need to check that + if returnState == blockEndState { + continue + } + + // Look for ternary expr ? expr : expr. The return state points at block end, + // which points at loop entry state + if returnStateTarget == blockEndState { + continue + } + + // Look for complex prefix 'between expr and expr' case where 2nd expr's + // return state points at block end state of (...)* internal block + if returnStateTarget.GetStateType() == ATNStateBlockEnd && + len(returnStateTarget.GetTransitions()) == 1 && + returnStateTarget.GetTransitions()[0].getIsEpsilon() && + returnStateTarget.GetTransitions()[0].getTarget() == _p { + continue + } + + // anything else ain't conforming + return false + } + + return true +} + +func (p *ParserATNSimulator) getRuleName(index int) string { + if p.parser != nil && index >= 0 { + return p.parser.GetRuleNames()[index] + } + var sb strings.Builder + sb.Grow(32) + + sb.WriteString("') + return sb.String() +} + +func (p *ParserATNSimulator) getEpsilonTarget(config ATNConfig, t Transition, collectPredicates, inContext, fullCtx, treatEOFAsEpsilon bool) ATNConfig { + + switch t.getSerializationType() { + case TransitionRULE: + return p.ruleTransition(config, t.(*RuleTransition)) + case TransitionPRECEDENCE: + return p.precedenceTransition(config, t.(*PrecedencePredicateTransition), collectPredicates, inContext, fullCtx) + case TransitionPREDICATE: + return p.predTransition(config, t.(*PredicateTransition), collectPredicates, inContext, fullCtx) + case TransitionACTION: + return p.actionTransition(config, t.(*ActionTransition)) + case TransitionEPSILON: + return NewBaseATNConfig4(config, t.getTarget()) + case TransitionATOM, TransitionRANGE, TransitionSET: + // EOF transitions act like epsilon transitions after the first EOF + // transition is traversed + if treatEOFAsEpsilon { + if t.Matches(TokenEOF, 0, 1) { + return NewBaseATNConfig4(config, t.getTarget()) + } + } + return nil + default: + return nil + } +} + +func (p *ParserATNSimulator) actionTransition(config ATNConfig, t *ActionTransition) *BaseATNConfig { + if ParserATNSimulatorDebug { + fmt.Println("ACTION edge " + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex)) + } + return NewBaseATNConfig4(config, t.getTarget()) +} + +func (p *ParserATNSimulator) precedenceTransition(config ATNConfig, + pt *PrecedencePredicateTransition, collectPredicates, inContext, fullCtx bool) *BaseATNConfig { + + if ParserATNSimulatorDebug { + fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + + strconv.Itoa(pt.precedence) + ">=_p, ctx dependent=true") + if p.parser != nil { + fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil))) + } + } + var c *BaseATNConfig + if collectPredicates && inContext { + if fullCtx { + // In full context mode, we can evaluate predicates on-the-fly + // during closure, which dramatically reduces the size of + // the config sets. It also obviates the need to test predicates + // later during conflict resolution. + currentPosition := p.input.Index() + p.input.Seek(p.startIndex) + predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext) + p.input.Seek(currentPosition) + if predSucceeds { + c = NewBaseATNConfig4(config, pt.getTarget()) // no pred context + } + } else { + newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate()) + c = NewBaseATNConfig3(config, pt.getTarget(), newSemCtx) + } + } else { + c = NewBaseATNConfig4(config, pt.getTarget()) + } + if ParserATNSimulatorDebug { + fmt.Println("config from pred transition=" + c.String()) + } + return c +} + +func (p *ParserATNSimulator) predTransition(config ATNConfig, pt *PredicateTransition, collectPredicates, inContext, fullCtx bool) *BaseATNConfig { + + if ParserATNSimulatorDebug { + fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + strconv.Itoa(pt.ruleIndex) + + ":" + strconv.Itoa(pt.predIndex) + ", ctx dependent=" + fmt.Sprint(pt.isCtxDependent)) + if p.parser != nil { + fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil))) + } + } + var c *BaseATNConfig + if collectPredicates && (!pt.isCtxDependent || inContext) { + if fullCtx { + // In full context mode, we can evaluate predicates on-the-fly + // during closure, which dramatically reduces the size of + // the config sets. It also obviates the need to test predicates + // later during conflict resolution. + currentPosition := p.input.Index() + p.input.Seek(p.startIndex) + predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext) + p.input.Seek(currentPosition) + if predSucceeds { + c = NewBaseATNConfig4(config, pt.getTarget()) // no pred context + } + } else { + newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate()) + c = NewBaseATNConfig3(config, pt.getTarget(), newSemCtx) + } + } else { + c = NewBaseATNConfig4(config, pt.getTarget()) + } + if ParserATNSimulatorDebug { + fmt.Println("config from pred transition=" + c.String()) + } + return c +} + +func (p *ParserATNSimulator) ruleTransition(config ATNConfig, t *RuleTransition) *BaseATNConfig { + if ParserATNSimulatorDebug { + fmt.Println("CALL rule " + p.getRuleName(t.getTarget().GetRuleIndex()) + ", ctx=" + config.GetContext().String()) + } + returnState := t.followState + newContext := SingletonBasePredictionContextCreate(config.GetContext(), returnState.GetStateNumber()) + return NewBaseATNConfig1(config, t.getTarget(), newContext) +} + +func (p *ParserATNSimulator) getConflictingAlts(configs ATNConfigSet) *BitSet { + altsets := PredictionModegetConflictingAltSubsets(configs) + return PredictionModeGetAlts(altsets) +} + +// Sam pointed out a problem with the previous definition, v3, of +// ambiguous states. If we have another state associated with conflicting +// alternatives, we should keep going. For example, the following grammar +// +// s : (ID | ID ID?) '' +// +// When the ATN simulation reaches the state before '', it has a DFA +// state that looks like: [12|1|[], 6|2|[], 12|2|[]]. Naturally +// 12|1|[] and 12|2|[] conflict, but we cannot stop processing p node +// because alternative to has another way to continue, via [6|2|[]]. +// The key is that we have a single state that has config's only associated +// with a single alternative, 2, and crucially the state transitions +// among the configurations are all non-epsilon transitions. That means +// we don't consider any conflicts that include alternative 2. So, we +// ignore the conflict between alts 1 and 2. We ignore a set of +// conflicting alts when there is an intersection with an alternative +// associated with a single alt state in the state&rarrconfig-list map. +// +// It's also the case that we might have two conflicting configurations but +// also a 3rd nonconflicting configuration for a different alternative: +// [1|1|[], 1|2|[], 8|3|[]]. This can come about from grammar: +// +// a : A | A | A B +// +// After Matching input A, we reach the stop state for rule A, state 1. +// State 8 is the state right before B. Clearly alternatives 1 and 2 +// conflict and no amount of further lookahead will separate the two. +// However, alternative 3 will be able to continue and so we do not +// stop working on p state. In the previous example, we're concerned +// with states associated with the conflicting alternatives. Here alt +// 3 is not associated with the conflicting configs, but since we can continue +// looking for input reasonably, I don't declare the state done. We +// ignore a set of conflicting alts when we have an alternative +// that we still need to pursue. +// + +func (p *ParserATNSimulator) getConflictingAltsOrUniqueAlt(configs ATNConfigSet) *BitSet { + var conflictingAlts *BitSet + if configs.GetUniqueAlt() != ATNInvalidAltNumber { + conflictingAlts = NewBitSet() + conflictingAlts.add(configs.GetUniqueAlt()) + } else { + conflictingAlts = configs.GetConflictingAlts() + } + return conflictingAlts +} + +func (p *ParserATNSimulator) GetTokenName(t int) string { + if t == TokenEOF { + return "EOF" + } + + if p.parser != nil && p.parser.GetLiteralNames() != nil && t < len(p.parser.GetLiteralNames()) { + return p.parser.GetLiteralNames()[t] + "<" + strconv.Itoa(t) + ">" + } + + if p.parser != nil && p.parser.GetLiteralNames() != nil && t < len(p.parser.GetSymbolicNames()) { + return p.parser.GetSymbolicNames()[t] + "<" + strconv.Itoa(t) + ">" + } + + return strconv.Itoa(t) +} + +func (p *ParserATNSimulator) getLookaheadName(input TokenStream) string { + return p.GetTokenName(input.LA(1)) +} + +// Used for debugging in AdaptivePredict around execATN but I cut +// +// it out for clarity now that alg. works well. We can leave p +// "dead" code for a bit. +func (p *ParserATNSimulator) dumpDeadEndConfigs(nvae *NoViableAltException) { + + panic("Not implemented") + + // fmt.Println("dead end configs: ") + // var decs = nvae.deadEndConfigs + // + // for i:=0; i0) { + // var t = c.state.GetTransitions()[0] + // if t2, ok := t.(*AtomTransition); ok { + // trans = "Atom "+ p.GetTokenName(t2.label) + // } else if t3, ok := t.(SetTransition); ok { + // _, ok := t.(*NotSetTransition) + // + // var s string + // if (ok){ + // s = "~" + // } + // + // trans = s + "Set " + t3.set + // } + // } + // fmt.Errorf(c.String(p.parser, true) + ":" + trans) + // } +} + +func (p *ParserATNSimulator) noViableAlt(input TokenStream, outerContext ParserRuleContext, configs ATNConfigSet, startIndex int) *NoViableAltException { + return NewNoViableAltException(p.parser, input, input.Get(startIndex), input.LT(1), configs, outerContext) +} + +func (p *ParserATNSimulator) getUniqueAlt(configs ATNConfigSet) int { + alt := ATNInvalidAltNumber + for _, c := range configs.GetItems() { + if alt == ATNInvalidAltNumber { + alt = c.GetAlt() // found first alt + } else if c.GetAlt() != alt { + return ATNInvalidAltNumber + } + } + return alt +} + +// Add an edge to the DFA, if possible. This method calls +// {@link //addDFAState} to ensure the {@code to} state is present in the +// DFA. If {@code from} is {@code nil}, or if {@code t} is outside the +// range of edges that can be represented in the DFA tables, p method +// returns without adding the edge to the DFA. +// +//

If {@code to} is {@code nil}, p method returns {@code nil}. +// Otherwise, p method returns the {@link DFAState} returned by calling +// {@link //addDFAState} for the {@code to} state.

+// +// @param dfa The DFA +// @param from The source state for the edge +// @param t The input symbol +// @param to The target state for the edge +// +// @return If {@code to} is {@code nil}, p method returns {@code nil} +// otherwise p method returns the result of calling {@link //addDFAState} +// on {@code to} +func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFAState) *DFAState { + if ParserATNSimulatorDebug { + fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + p.GetTokenName(t)) + } + if to == nil { + return nil + } + p.atn.stateMu.Lock() + to = p.addDFAState(dfa, to) // used existing if possible not incoming + p.atn.stateMu.Unlock() + if from == nil || t < -1 || t > p.atn.maxTokenType { + return to + } + p.atn.edgeMu.Lock() + if from.getEdges() == nil { + from.setEdges(make([]*DFAState, p.atn.maxTokenType+1+1)) + } + from.setIthEdge(t+1, to) // connect + p.atn.edgeMu.Unlock() + + if ParserATNSimulatorDebug { + var names []string + if p.parser != nil { + names = p.parser.GetLiteralNames() + } + + fmt.Println("DFA=\n" + dfa.String(names, nil)) + } + return to +} + +// Add state {@code D} to the DFA if it is not already present, and return +// the actual instance stored in the DFA. If a state equivalent to {@code D} +// is already in the DFA, the existing state is returned. Otherwise p +// method returns {@code D} after adding it to the DFA. +// +//

If {@code D} is {@link //ERROR}, p method returns {@link //ERROR} and +// does not change the DFA.

+// +// @param dfa The dfa +// @param D The DFA state to add +// @return The state stored in the DFA. This will be either the existing +// state if {@code D} is already in the DFA, or {@code D} itself if the +// state was not already present. +func (p *ParserATNSimulator) addDFAState(dfa *DFA, d *DFAState) *DFAState { + if d == ATNSimulatorError { + return d + } + existing, present := dfa.states.Get(d) + if present { + if ParserATNSimulatorTraceATNSim { + fmt.Print("addDFAState " + d.String() + " exists") + } + return existing + } + + // The state was not present, so update it with configs + // + d.stateNumber = dfa.states.Len() + if !d.configs.ReadOnly() { + d.configs.OptimizeConfigs(p.BaseATNSimulator) + d.configs.SetReadOnly(true) + } + dfa.states.Put(d) + if ParserATNSimulatorTraceATNSim { + fmt.Println("addDFAState new " + d.String()) + } + + return d +} + +func (p *ParserATNSimulator) ReportAttemptingFullContext(dfa *DFA, conflictingAlts *BitSet, configs ATNConfigSet, startIndex, stopIndex int) { + if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug { + interval := NewInterval(startIndex, stopIndex+1) + fmt.Println("ReportAttemptingFullContext decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() + + ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) + } + if p.parser != nil { + p.parser.GetErrorListenerDispatch().ReportAttemptingFullContext(p.parser, dfa, startIndex, stopIndex, conflictingAlts, configs) + } +} + +func (p *ParserATNSimulator) ReportContextSensitivity(dfa *DFA, prediction int, configs ATNConfigSet, startIndex, stopIndex int) { + if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug { + interval := NewInterval(startIndex, stopIndex+1) + fmt.Println("ReportContextSensitivity decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() + + ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) + } + if p.parser != nil { + p.parser.GetErrorListenerDispatch().ReportContextSensitivity(p.parser, dfa, startIndex, stopIndex, prediction, configs) + } +} + +// If context sensitive parsing, we know it's ambiguity not conflict// +func (p *ParserATNSimulator) ReportAmbiguity(dfa *DFA, D *DFAState, startIndex, stopIndex int, + exact bool, ambigAlts *BitSet, configs ATNConfigSet) { + if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug { + interval := NewInterval(startIndex, stopIndex+1) + fmt.Println("ReportAmbiguity " + ambigAlts.String() + ":" + configs.String() + + ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) + } + if p.parser != nil { + p.parser.GetErrorListenerDispatch().ReportAmbiguity(p.parser, dfa, startIndex, stopIndex, exact, ambigAlts, configs) + } +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_rule_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_rule_context.go index 1c8cee7479..1d26d239bd 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_rule_context.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/parser_rule_context.go @@ -1,362 +1,362 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "reflect" - "strconv" -) - -type ParserRuleContext interface { - RuleContext - - SetException(RecognitionException) - - AddTokenNode(token Token) *TerminalNodeImpl - AddErrorNode(badToken Token) *ErrorNodeImpl - - EnterRule(listener ParseTreeListener) - ExitRule(listener ParseTreeListener) - - SetStart(Token) - GetStart() Token - - SetStop(Token) - GetStop() Token - - AddChild(child RuleContext) RuleContext - RemoveLastChild() -} - -type BaseParserRuleContext struct { - *BaseRuleContext - - start, stop Token - exception RecognitionException - children []Tree -} - -func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int) *BaseParserRuleContext { - prc := new(BaseParserRuleContext) - - prc.BaseRuleContext = NewBaseRuleContext(parent, invokingStateNumber) - - prc.RuleIndex = -1 - // * If we are debugging or building a parse tree for a Visitor, - // we need to track all of the tokens and rule invocations associated - // with prc rule's context. This is empty for parsing w/o tree constr. - // operation because we don't the need to track the details about - // how we parse prc rule. - // / - prc.children = nil - prc.start = nil - prc.stop = nil - // The exception that forced prc rule to return. If the rule successfully - // completed, prc is {@code nil}. - prc.exception = nil - - return prc -} - -func (prc *BaseParserRuleContext) SetException(e RecognitionException) { - prc.exception = e -} - -func (prc *BaseParserRuleContext) GetChildren() []Tree { - return prc.children -} - -func (prc *BaseParserRuleContext) CopyFrom(ctx *BaseParserRuleContext) { - // from RuleContext - prc.parentCtx = ctx.parentCtx - prc.invokingState = ctx.invokingState - prc.children = nil - prc.start = ctx.start - prc.stop = ctx.stop -} - -func (prc *BaseParserRuleContext) GetText() string { - if prc.GetChildCount() == 0 { - return "" - } - - var s string - for _, child := range prc.children { - s += child.(ParseTree).GetText() - } - - return s -} - -// Double dispatch methods for listeners -func (prc *BaseParserRuleContext) EnterRule(listener ParseTreeListener) { -} - -func (prc *BaseParserRuleContext) ExitRule(listener ParseTreeListener) { -} - -// * Does not set parent link other add methods do that/// -func (prc *BaseParserRuleContext) addTerminalNodeChild(child TerminalNode) TerminalNode { - if prc.children == nil { - prc.children = make([]Tree, 0) - } - if child == nil { - panic("Child may not be null") - } - prc.children = append(prc.children, child) - return child -} - -func (prc *BaseParserRuleContext) AddChild(child RuleContext) RuleContext { - if prc.children == nil { - prc.children = make([]Tree, 0) - } - if child == nil { - panic("Child may not be null") - } - prc.children = append(prc.children, child) - return child -} - -// * Used by EnterOuterAlt to toss out a RuleContext previously added as -// we entered a rule. If we have // label, we will need to remove -// generic ruleContext object. -// / -func (prc *BaseParserRuleContext) RemoveLastChild() { - if prc.children != nil && len(prc.children) > 0 { - prc.children = prc.children[0 : len(prc.children)-1] - } -} - -func (prc *BaseParserRuleContext) AddTokenNode(token Token) *TerminalNodeImpl { - - node := NewTerminalNodeImpl(token) - prc.addTerminalNodeChild(node) - node.parentCtx = prc - return node - -} - -func (prc *BaseParserRuleContext) AddErrorNode(badToken Token) *ErrorNodeImpl { - node := NewErrorNodeImpl(badToken) - prc.addTerminalNodeChild(node) - node.parentCtx = prc - return node -} - -func (prc *BaseParserRuleContext) GetChild(i int) Tree { - if prc.children != nil && len(prc.children) >= i { - return prc.children[i] - } - - return nil -} - -func (prc *BaseParserRuleContext) GetChildOfType(i int, childType reflect.Type) RuleContext { - if childType == nil { - return prc.GetChild(i).(RuleContext) - } - - for j := 0; j < len(prc.children); j++ { - child := prc.children[j] - if reflect.TypeOf(child) == childType { - if i == 0 { - return child.(RuleContext) - } - - i-- - } - } - - return nil -} - -func (prc *BaseParserRuleContext) ToStringTree(ruleNames []string, recog Recognizer) string { - return TreesStringTree(prc, ruleNames, recog) -} - -func (prc *BaseParserRuleContext) GetRuleContext() RuleContext { - return prc -} - -func (prc *BaseParserRuleContext) Accept(visitor ParseTreeVisitor) interface{} { - return visitor.VisitChildren(prc) -} - -func (prc *BaseParserRuleContext) SetStart(t Token) { - prc.start = t -} - -func (prc *BaseParserRuleContext) GetStart() Token { - return prc.start -} - -func (prc *BaseParserRuleContext) SetStop(t Token) { - prc.stop = t -} - -func (prc *BaseParserRuleContext) GetStop() Token { - return prc.stop -} - -func (prc *BaseParserRuleContext) GetToken(ttype int, i int) TerminalNode { - - for j := 0; j < len(prc.children); j++ { - child := prc.children[j] - if c2, ok := child.(TerminalNode); ok { - if c2.GetSymbol().GetTokenType() == ttype { - if i == 0 { - return c2 - } - - i-- - } - } - } - return nil -} - -func (prc *BaseParserRuleContext) GetTokens(ttype int) []TerminalNode { - if prc.children == nil { - return make([]TerminalNode, 0) - } - - tokens := make([]TerminalNode, 0) - - for j := 0; j < len(prc.children); j++ { - child := prc.children[j] - if tchild, ok := child.(TerminalNode); ok { - if tchild.GetSymbol().GetTokenType() == ttype { - tokens = append(tokens, tchild) - } - } - } - - return tokens -} - -func (prc *BaseParserRuleContext) GetPayload() interface{} { - return prc -} - -func (prc *BaseParserRuleContext) getChild(ctxType reflect.Type, i int) RuleContext { - if prc.children == nil || i < 0 || i >= len(prc.children) { - return nil - } - - j := -1 // what element have we found with ctxType? - for _, o := range prc.children { - - childType := reflect.TypeOf(o) - - if childType.Implements(ctxType) { - j++ - if j == i { - return o.(RuleContext) - } - } - } - return nil -} - -// Go lacks generics, so it's not possible for us to return the child with the correct type, but we do -// check for convertibility - -func (prc *BaseParserRuleContext) GetTypedRuleContext(ctxType reflect.Type, i int) RuleContext { - return prc.getChild(ctxType, i) -} - -func (prc *BaseParserRuleContext) GetTypedRuleContexts(ctxType reflect.Type) []RuleContext { - if prc.children == nil { - return make([]RuleContext, 0) - } - - contexts := make([]RuleContext, 0) - - for _, child := range prc.children { - childType := reflect.TypeOf(child) - - if childType.ConvertibleTo(ctxType) { - contexts = append(contexts, child.(RuleContext)) - } - } - return contexts -} - -func (prc *BaseParserRuleContext) GetChildCount() int { - if prc.children == nil { - return 0 - } - - return len(prc.children) -} - -func (prc *BaseParserRuleContext) GetSourceInterval() *Interval { - if prc.start == nil || prc.stop == nil { - return TreeInvalidInterval - } - - return NewInterval(prc.start.GetTokenIndex(), prc.stop.GetTokenIndex()) -} - -//need to manage circular dependencies, so export now - -// Print out a whole tree, not just a node, in LISP format -// (root child1 .. childN). Print just a node if b is a leaf. -// - -func (prc *BaseParserRuleContext) String(ruleNames []string, stop RuleContext) string { - - var p ParserRuleContext = prc - s := "[" - for p != nil && p != stop { - if ruleNames == nil { - if !p.IsEmpty() { - s += strconv.Itoa(p.GetInvokingState()) - } - } else { - ri := p.GetRuleIndex() - var ruleName string - if ri >= 0 && ri < len(ruleNames) { - ruleName = ruleNames[ri] - } else { - ruleName = strconv.Itoa(ri) - } - s += ruleName - } - if p.GetParent() != nil && (ruleNames != nil || !p.GetParent().(ParserRuleContext).IsEmpty()) { - s += " " - } - pi := p.GetParent() - if pi != nil { - p = pi.(ParserRuleContext) - } else { - p = nil - } - } - s += "]" - return s -} - -var ParserRuleContextEmpty = NewBaseParserRuleContext(nil, -1) - -type InterpreterRuleContext interface { - ParserRuleContext -} - -type BaseInterpreterRuleContext struct { - *BaseParserRuleContext -} - -func NewBaseInterpreterRuleContext(parent BaseInterpreterRuleContext, invokingStateNumber, ruleIndex int) *BaseInterpreterRuleContext { - - prc := new(BaseInterpreterRuleContext) - - prc.BaseParserRuleContext = NewBaseParserRuleContext(parent, invokingStateNumber) - - prc.RuleIndex = ruleIndex - - return prc -} +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "reflect" + "strconv" +) + +type ParserRuleContext interface { + RuleContext + + SetException(RecognitionException) + + AddTokenNode(token Token) *TerminalNodeImpl + AddErrorNode(badToken Token) *ErrorNodeImpl + + EnterRule(listener ParseTreeListener) + ExitRule(listener ParseTreeListener) + + SetStart(Token) + GetStart() Token + + SetStop(Token) + GetStop() Token + + AddChild(child RuleContext) RuleContext + RemoveLastChild() +} + +type BaseParserRuleContext struct { + *BaseRuleContext + + start, stop Token + exception RecognitionException + children []Tree +} + +func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int) *BaseParserRuleContext { + prc := new(BaseParserRuleContext) + + prc.BaseRuleContext = NewBaseRuleContext(parent, invokingStateNumber) + + prc.RuleIndex = -1 + // * If we are debugging or building a parse tree for a Visitor, + // we need to track all of the tokens and rule invocations associated + // with prc rule's context. This is empty for parsing w/o tree constr. + // operation because we don't the need to track the details about + // how we parse prc rule. + // / + prc.children = nil + prc.start = nil + prc.stop = nil + // The exception that forced prc rule to return. If the rule successfully + // completed, prc is {@code nil}. + prc.exception = nil + + return prc +} + +func (prc *BaseParserRuleContext) SetException(e RecognitionException) { + prc.exception = e +} + +func (prc *BaseParserRuleContext) GetChildren() []Tree { + return prc.children +} + +func (prc *BaseParserRuleContext) CopyFrom(ctx *BaseParserRuleContext) { + // from RuleContext + prc.parentCtx = ctx.parentCtx + prc.invokingState = ctx.invokingState + prc.children = nil + prc.start = ctx.start + prc.stop = ctx.stop +} + +func (prc *BaseParserRuleContext) GetText() string { + if prc.GetChildCount() == 0 { + return "" + } + + var s string + for _, child := range prc.children { + s += child.(ParseTree).GetText() + } + + return s +} + +// Double dispatch methods for listeners +func (prc *BaseParserRuleContext) EnterRule(listener ParseTreeListener) { +} + +func (prc *BaseParserRuleContext) ExitRule(listener ParseTreeListener) { +} + +// * Does not set parent link other add methods do that/// +func (prc *BaseParserRuleContext) addTerminalNodeChild(child TerminalNode) TerminalNode { + if prc.children == nil { + prc.children = make([]Tree, 0) + } + if child == nil { + panic("Child may not be null") + } + prc.children = append(prc.children, child) + return child +} + +func (prc *BaseParserRuleContext) AddChild(child RuleContext) RuleContext { + if prc.children == nil { + prc.children = make([]Tree, 0) + } + if child == nil { + panic("Child may not be null") + } + prc.children = append(prc.children, child) + return child +} + +// * Used by EnterOuterAlt to toss out a RuleContext previously added as +// we entered a rule. If we have // label, we will need to remove +// generic ruleContext object. +// / +func (prc *BaseParserRuleContext) RemoveLastChild() { + if prc.children != nil && len(prc.children) > 0 { + prc.children = prc.children[0 : len(prc.children)-1] + } +} + +func (prc *BaseParserRuleContext) AddTokenNode(token Token) *TerminalNodeImpl { + + node := NewTerminalNodeImpl(token) + prc.addTerminalNodeChild(node) + node.parentCtx = prc + return node + +} + +func (prc *BaseParserRuleContext) AddErrorNode(badToken Token) *ErrorNodeImpl { + node := NewErrorNodeImpl(badToken) + prc.addTerminalNodeChild(node) + node.parentCtx = prc + return node +} + +func (prc *BaseParserRuleContext) GetChild(i int) Tree { + if prc.children != nil && len(prc.children) >= i { + return prc.children[i] + } + + return nil +} + +func (prc *BaseParserRuleContext) GetChildOfType(i int, childType reflect.Type) RuleContext { + if childType == nil { + return prc.GetChild(i).(RuleContext) + } + + for j := 0; j < len(prc.children); j++ { + child := prc.children[j] + if reflect.TypeOf(child) == childType { + if i == 0 { + return child.(RuleContext) + } + + i-- + } + } + + return nil +} + +func (prc *BaseParserRuleContext) ToStringTree(ruleNames []string, recog Recognizer) string { + return TreesStringTree(prc, ruleNames, recog) +} + +func (prc *BaseParserRuleContext) GetRuleContext() RuleContext { + return prc +} + +func (prc *BaseParserRuleContext) Accept(visitor ParseTreeVisitor) interface{} { + return visitor.VisitChildren(prc) +} + +func (prc *BaseParserRuleContext) SetStart(t Token) { + prc.start = t +} + +func (prc *BaseParserRuleContext) GetStart() Token { + return prc.start +} + +func (prc *BaseParserRuleContext) SetStop(t Token) { + prc.stop = t +} + +func (prc *BaseParserRuleContext) GetStop() Token { + return prc.stop +} + +func (prc *BaseParserRuleContext) GetToken(ttype int, i int) TerminalNode { + + for j := 0; j < len(prc.children); j++ { + child := prc.children[j] + if c2, ok := child.(TerminalNode); ok { + if c2.GetSymbol().GetTokenType() == ttype { + if i == 0 { + return c2 + } + + i-- + } + } + } + return nil +} + +func (prc *BaseParserRuleContext) GetTokens(ttype int) []TerminalNode { + if prc.children == nil { + return make([]TerminalNode, 0) + } + + tokens := make([]TerminalNode, 0) + + for j := 0; j < len(prc.children); j++ { + child := prc.children[j] + if tchild, ok := child.(TerminalNode); ok { + if tchild.GetSymbol().GetTokenType() == ttype { + tokens = append(tokens, tchild) + } + } + } + + return tokens +} + +func (prc *BaseParserRuleContext) GetPayload() interface{} { + return prc +} + +func (prc *BaseParserRuleContext) getChild(ctxType reflect.Type, i int) RuleContext { + if prc.children == nil || i < 0 || i >= len(prc.children) { + return nil + } + + j := -1 // what element have we found with ctxType? + for _, o := range prc.children { + + childType := reflect.TypeOf(o) + + if childType.Implements(ctxType) { + j++ + if j == i { + return o.(RuleContext) + } + } + } + return nil +} + +// Go lacks generics, so it's not possible for us to return the child with the correct type, but we do +// check for convertibility + +func (prc *BaseParserRuleContext) GetTypedRuleContext(ctxType reflect.Type, i int) RuleContext { + return prc.getChild(ctxType, i) +} + +func (prc *BaseParserRuleContext) GetTypedRuleContexts(ctxType reflect.Type) []RuleContext { + if prc.children == nil { + return make([]RuleContext, 0) + } + + contexts := make([]RuleContext, 0) + + for _, child := range prc.children { + childType := reflect.TypeOf(child) + + if childType.ConvertibleTo(ctxType) { + contexts = append(contexts, child.(RuleContext)) + } + } + return contexts +} + +func (prc *BaseParserRuleContext) GetChildCount() int { + if prc.children == nil { + return 0 + } + + return len(prc.children) +} + +func (prc *BaseParserRuleContext) GetSourceInterval() *Interval { + if prc.start == nil || prc.stop == nil { + return TreeInvalidInterval + } + + return NewInterval(prc.start.GetTokenIndex(), prc.stop.GetTokenIndex()) +} + +//need to manage circular dependencies, so export now + +// Print out a whole tree, not just a node, in LISP format +// (root child1 .. childN). Print just a node if b is a leaf. +// + +func (prc *BaseParserRuleContext) String(ruleNames []string, stop RuleContext) string { + + var p ParserRuleContext = prc + s := "[" + for p != nil && p != stop { + if ruleNames == nil { + if !p.IsEmpty() { + s += strconv.Itoa(p.GetInvokingState()) + } + } else { + ri := p.GetRuleIndex() + var ruleName string + if ri >= 0 && ri < len(ruleNames) { + ruleName = ruleNames[ri] + } else { + ruleName = strconv.Itoa(ri) + } + s += ruleName + } + if p.GetParent() != nil && (ruleNames != nil || !p.GetParent().(ParserRuleContext).IsEmpty()) { + s += " " + } + pi := p.GetParent() + if pi != nil { + p = pi.(ParserRuleContext) + } else { + p = nil + } + } + s += "]" + return s +} + +var ParserRuleContextEmpty = NewBaseParserRuleContext(nil, -1) + +type InterpreterRuleContext interface { + ParserRuleContext +} + +type BaseInterpreterRuleContext struct { + *BaseParserRuleContext +} + +func NewBaseInterpreterRuleContext(parent BaseInterpreterRuleContext, invokingStateNumber, ruleIndex int) *BaseInterpreterRuleContext { + + prc := new(BaseInterpreterRuleContext) + + prc.BaseParserRuleContext = NewBaseParserRuleContext(parent, invokingStateNumber) + + prc.RuleIndex = ruleIndex + + return prc +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go index ba62af3610..d7ff7458a8 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_context.go @@ -1,806 +1,806 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "golang.org/x/exp/slices" - "strconv" -) - -// Represents {@code $} in local context prediction, which means wildcard. -// {@code//+x =//}. -// / -const ( - BasePredictionContextEmptyReturnState = 0x7FFFFFFF -) - -// Represents {@code $} in an array in full context mode, when {@code $} -// doesn't mean wildcard: {@code $ + x = [$,x]}. Here, -// {@code $} = {@link //EmptyReturnState}. -// / - -var ( - BasePredictionContextglobalNodeCount = 1 - BasePredictionContextid = BasePredictionContextglobalNodeCount -) - -type PredictionContext interface { - Hash() int - Equals(interface{}) bool - GetParent(int) PredictionContext - getReturnState(int) int - length() int - isEmpty() bool - hasEmptyPath() bool - String() string -} - -type BasePredictionContext struct { - cachedHash int -} - -func NewBasePredictionContext(cachedHash int) *BasePredictionContext { - pc := new(BasePredictionContext) - pc.cachedHash = cachedHash - - return pc -} - -func (b *BasePredictionContext) isEmpty() bool { - return false -} - -func calculateHash(parent PredictionContext, returnState int) int { - h := murmurInit(1) - h = murmurUpdate(h, parent.Hash()) - h = murmurUpdate(h, returnState) - return murmurFinish(h, 2) -} - -var _emptyPredictionContextHash int - -func init() { - _emptyPredictionContextHash = murmurInit(1) - _emptyPredictionContextHash = murmurFinish(_emptyPredictionContextHash, 0) -} - -func calculateEmptyHash() int { - return _emptyPredictionContextHash -} - -// Used to cache {@link BasePredictionContext} objects. Its used for the shared -// context cash associated with contexts in DFA states. This cache -// can be used for both lexers and parsers. - -type PredictionContextCache struct { - cache map[PredictionContext]PredictionContext -} - -func NewPredictionContextCache() *PredictionContextCache { - t := new(PredictionContextCache) - t.cache = make(map[PredictionContext]PredictionContext) - return t -} - -// Add a context to the cache and return it. If the context already exists, -// return that one instead and do not add a Newcontext to the cache. -// Protect shared cache from unsafe thread access. -func (p *PredictionContextCache) add(ctx PredictionContext) PredictionContext { - if ctx == BasePredictionContextEMPTY { - return BasePredictionContextEMPTY - } - existing := p.cache[ctx] - if existing != nil { - return existing - } - p.cache[ctx] = ctx - return ctx -} - -func (p *PredictionContextCache) Get(ctx PredictionContext) PredictionContext { - return p.cache[ctx] -} - -func (p *PredictionContextCache) length() int { - return len(p.cache) -} - -type SingletonPredictionContext interface { - PredictionContext -} - -type BaseSingletonPredictionContext struct { - *BasePredictionContext - - parentCtx PredictionContext - returnState int -} - -func NewBaseSingletonPredictionContext(parent PredictionContext, returnState int) *BaseSingletonPredictionContext { - var cachedHash int - if parent != nil { - cachedHash = calculateHash(parent, returnState) - } else { - cachedHash = calculateEmptyHash() - } - - s := new(BaseSingletonPredictionContext) - s.BasePredictionContext = NewBasePredictionContext(cachedHash) - - s.parentCtx = parent - s.returnState = returnState - - return s -} - -func SingletonBasePredictionContextCreate(parent PredictionContext, returnState int) PredictionContext { - if returnState == BasePredictionContextEmptyReturnState && parent == nil { - // someone can pass in the bits of an array ctx that mean $ - return BasePredictionContextEMPTY - } - - return NewBaseSingletonPredictionContext(parent, returnState) -} - -func (b *BaseSingletonPredictionContext) length() int { - return 1 -} - -func (b *BaseSingletonPredictionContext) GetParent(index int) PredictionContext { - return b.parentCtx -} - -func (b *BaseSingletonPredictionContext) getReturnState(index int) int { - return b.returnState -} - -func (b *BaseSingletonPredictionContext) hasEmptyPath() bool { - return b.returnState == BasePredictionContextEmptyReturnState -} - -func (b *BaseSingletonPredictionContext) Hash() int { - return b.cachedHash -} - -func (b *BaseSingletonPredictionContext) Equals(other interface{}) bool { - if b == other { - return true - } - if _, ok := other.(*BaseSingletonPredictionContext); !ok { - return false - } - - otherP := other.(*BaseSingletonPredictionContext) - - if b.returnState != otherP.getReturnState(0) { - return false - } - if b.parentCtx == nil { - return otherP.parentCtx == nil - } - - return b.parentCtx.Equals(otherP.parentCtx) -} - -func (b *BaseSingletonPredictionContext) String() string { - var up string - - if b.parentCtx == nil { - up = "" - } else { - up = b.parentCtx.String() - } - - if len(up) == 0 { - if b.returnState == BasePredictionContextEmptyReturnState { - return "$" - } - - return strconv.Itoa(b.returnState) - } - - return strconv.Itoa(b.returnState) + " " + up -} - -var BasePredictionContextEMPTY = NewEmptyPredictionContext() - -type EmptyPredictionContext struct { - *BaseSingletonPredictionContext -} - -func NewEmptyPredictionContext() *EmptyPredictionContext { - - p := new(EmptyPredictionContext) - - p.BaseSingletonPredictionContext = NewBaseSingletonPredictionContext(nil, BasePredictionContextEmptyReturnState) - p.cachedHash = calculateEmptyHash() - return p -} - -func (e *EmptyPredictionContext) isEmpty() bool { - return true -} - -func (e *EmptyPredictionContext) GetParent(index int) PredictionContext { - return nil -} - -func (e *EmptyPredictionContext) getReturnState(index int) int { - return e.returnState -} - -func (e *EmptyPredictionContext) Hash() int { - return e.cachedHash -} - -func (e *EmptyPredictionContext) Equals(other interface{}) bool { - return e == other -} - -func (e *EmptyPredictionContext) String() string { - return "$" -} - -type ArrayPredictionContext struct { - *BasePredictionContext - - parents []PredictionContext - returnStates []int -} - -func NewArrayPredictionContext(parents []PredictionContext, returnStates []int) *ArrayPredictionContext { - // Parent can be nil only if full ctx mode and we make an array - // from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using - // nil parent and - // returnState == {@link //EmptyReturnState}. - hash := murmurInit(1) - - for _, parent := range parents { - hash = murmurUpdate(hash, parent.Hash()) - } - - for _, returnState := range returnStates { - hash = murmurUpdate(hash, returnState) - } - - hash = murmurFinish(hash, len(parents)<<1) - - c := new(ArrayPredictionContext) - c.BasePredictionContext = NewBasePredictionContext(hash) - - c.parents = parents - c.returnStates = returnStates - - return c -} - -func (a *ArrayPredictionContext) GetReturnStates() []int { - return a.returnStates -} - -func (a *ArrayPredictionContext) hasEmptyPath() bool { - return a.getReturnState(a.length()-1) == BasePredictionContextEmptyReturnState -} - -func (a *ArrayPredictionContext) isEmpty() bool { - // since EmptyReturnState can only appear in the last position, we - // don't need to verify that size==1 - return a.returnStates[0] == BasePredictionContextEmptyReturnState -} - -func (a *ArrayPredictionContext) length() int { - return len(a.returnStates) -} - -func (a *ArrayPredictionContext) GetParent(index int) PredictionContext { - return a.parents[index] -} - -func (a *ArrayPredictionContext) getReturnState(index int) int { - return a.returnStates[index] -} - -// Equals is the default comparison function for ArrayPredictionContext when no specialized -// implementation is needed for a collection -func (a *ArrayPredictionContext) Equals(o interface{}) bool { - if a == o { - return true - } - other, ok := o.(*ArrayPredictionContext) - if !ok { - return false - } - if a.cachedHash != other.Hash() { - return false // can't be same if hash is different - } - - // Must compare the actual array elements and not just the array address - // - return slices.Equal(a.returnStates, other.returnStates) && - slices.EqualFunc(a.parents, other.parents, func(x, y PredictionContext) bool { - return x.Equals(y) - }) -} - -// Hash is the default hash function for ArrayPredictionContext when no specialized -// implementation is needed for a collection -func (a *ArrayPredictionContext) Hash() int { - return a.BasePredictionContext.cachedHash -} - -func (a *ArrayPredictionContext) String() string { - if a.isEmpty() { - return "[]" - } - - s := "[" - for i := 0; i < len(a.returnStates); i++ { - if i > 0 { - s = s + ", " - } - if a.returnStates[i] == BasePredictionContextEmptyReturnState { - s = s + "$" - continue - } - s = s + strconv.Itoa(a.returnStates[i]) - if a.parents[i] != nil { - s = s + " " + a.parents[i].String() - } else { - s = s + "nil" - } - } - - return s + "]" -} - -// Convert a {@link RuleContext} tree to a {@link BasePredictionContext} graph. -// Return {@link //EMPTY} if {@code outerContext} is empty or nil. -// / -func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) PredictionContext { - if outerContext == nil { - outerContext = ParserRuleContextEmpty - } - // if we are in RuleContext of start rule, s, then BasePredictionContext - // is EMPTY. Nobody called us. (if we are empty, return empty) - if outerContext.GetParent() == nil || outerContext == ParserRuleContextEmpty { - return BasePredictionContextEMPTY - } - // If we have a parent, convert it to a BasePredictionContext graph - parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext)) - state := a.states[outerContext.GetInvokingState()] - transition := state.GetTransitions()[0] - - return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber()) -} - -func merge(a, b PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext { - - // Share same graph if both same - // - if a == b || a.Equals(b) { - return a - } - - // In Java, EmptyPredictionContext inherits from SingletonPredictionContext, and so the test - // in java for SingletonPredictionContext will succeed and a new ArrayPredictionContext will be created - // from it. - // In go, EmptyPredictionContext does not equate to SingletonPredictionContext and so that conversion - // will fail. We need to test for both Empty and Singleton and create an ArrayPredictionContext from - // either of them. - - ac, ok1 := a.(*BaseSingletonPredictionContext) - bc, ok2 := b.(*BaseSingletonPredictionContext) - - if ok1 && ok2 { - return mergeSingletons(ac, bc, rootIsWildcard, mergeCache) - } - // At least one of a or b is array - // If one is $ and rootIsWildcard, return $ as// wildcard - if rootIsWildcard { - if _, ok := a.(*EmptyPredictionContext); ok { - return a - } - if _, ok := b.(*EmptyPredictionContext); ok { - return b - } - } - - // Convert Singleton or Empty so both are arrays to normalize - We should not use the existing parameters - // here. - // - // TODO: I think that maybe the Prediction Context structs should be redone as there is a chance we will see this mess again - maybe redo the logic here - - var arp, arb *ArrayPredictionContext - var ok bool - if arp, ok = a.(*ArrayPredictionContext); ok { - } else if _, ok = a.(*BaseSingletonPredictionContext); ok { - arp = NewArrayPredictionContext([]PredictionContext{a.GetParent(0)}, []int{a.getReturnState(0)}) - } else if _, ok = a.(*EmptyPredictionContext); ok { - arp = NewArrayPredictionContext([]PredictionContext{}, []int{}) - } - - if arb, ok = b.(*ArrayPredictionContext); ok { - } else if _, ok = b.(*BaseSingletonPredictionContext); ok { - arb = NewArrayPredictionContext([]PredictionContext{b.GetParent(0)}, []int{b.getReturnState(0)}) - } else if _, ok = b.(*EmptyPredictionContext); ok { - arb = NewArrayPredictionContext([]PredictionContext{}, []int{}) - } - - // Both arp and arb - return mergeArrays(arp, arb, rootIsWildcard, mergeCache) -} - -// Merge two {@link SingletonBasePredictionContext} instances. -// -//

Stack tops equal, parents merge is same return left graph.
-//

-// -//

Same stack top, parents differ merge parents giving array node, then -// remainders of those graphs. A Newroot node is created to point to the -// merged parents.
-//

-// -//

Different stack tops pointing to same parent. Make array node for the -// root where both element in the root point to the same (original) -// parent.
-//

-// -//

Different stack tops pointing to different parents. Make array node for -// the root where each element points to the corresponding original -// parent.
-//

-// -// @param a the first {@link SingletonBasePredictionContext} -// @param b the second {@link SingletonBasePredictionContext} -// @param rootIsWildcard {@code true} if this is a local-context merge, -// otherwise false to indicate a full-context merge -// @param mergeCache -// / -func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext { - if mergeCache != nil { - previous := mergeCache.Get(a.Hash(), b.Hash()) - if previous != nil { - return previous.(PredictionContext) - } - previous = mergeCache.Get(b.Hash(), a.Hash()) - if previous != nil { - return previous.(PredictionContext) - } - } - - rootMerge := mergeRoot(a, b, rootIsWildcard) - if rootMerge != nil { - if mergeCache != nil { - mergeCache.set(a.Hash(), b.Hash(), rootMerge) - } - return rootMerge - } - if a.returnState == b.returnState { - parent := merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache) - // if parent is same as existing a or b parent or reduced to a parent, - // return it - if parent == a.parentCtx { - return a // ax + bx = ax, if a=b - } - if parent == b.parentCtx { - return b // ax + bx = bx, if a=b - } - // else: ax + ay = a'[x,y] - // merge parents x and y, giving array node with x,y then remainders - // of those graphs. dup a, a' points at merged array - // Newjoined parent so create Newsingleton pointing to it, a' - spc := SingletonBasePredictionContextCreate(parent, a.returnState) - if mergeCache != nil { - mergeCache.set(a.Hash(), b.Hash(), spc) - } - return spc - } - // a != b payloads differ - // see if we can collapse parents due to $+x parents if local ctx - var singleParent PredictionContext - if a == b || (a.parentCtx != nil && a.parentCtx == b.parentCtx) { // ax + - // bx = - // [a,b]x - singleParent = a.parentCtx - } - if singleParent != nil { // parents are same - // sort payloads and use same parent - payloads := []int{a.returnState, b.returnState} - if a.returnState > b.returnState { - payloads[0] = b.returnState - payloads[1] = a.returnState - } - parents := []PredictionContext{singleParent, singleParent} - apc := NewArrayPredictionContext(parents, payloads) - if mergeCache != nil { - mergeCache.set(a.Hash(), b.Hash(), apc) - } - return apc - } - // parents differ and can't merge them. Just pack together - // into array can't merge. - // ax + by = [ax,by] - payloads := []int{a.returnState, b.returnState} - parents := []PredictionContext{a.parentCtx, b.parentCtx} - if a.returnState > b.returnState { // sort by payload - payloads[0] = b.returnState - payloads[1] = a.returnState - parents = []PredictionContext{b.parentCtx, a.parentCtx} - } - apc := NewArrayPredictionContext(parents, payloads) - if mergeCache != nil { - mergeCache.set(a.Hash(), b.Hash(), apc) - } - return apc -} - -// Handle case where at least one of {@code a} or {@code b} is -// {@link //EMPTY}. In the following diagrams, the symbol {@code $} is used -// to represent {@link //EMPTY}. -// -//

Local-Context Merges

-// -//

These local-context merge operations are used when {@code rootIsWildcard} -// is true.

-// -//

{@link //EMPTY} is superset of any graph return {@link //EMPTY}.
-//

-// -//

{@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is -// {@code //EMPTY} return left graph.
-//

-// -//

Special case of last merge if local context.
-//

-// -//

Full-Context Merges

-// -//

These full-context merge operations are used when {@code rootIsWildcard} -// is false.

-// -//

-// -//

Must keep all contexts {@link //EMPTY} in array is a special value (and -// nil parent).
-//

-// -//

-// -// @param a the first {@link SingletonBasePredictionContext} -// @param b the second {@link SingletonBasePredictionContext} -// @param rootIsWildcard {@code true} if this is a local-context merge, -// otherwise false to indicate a full-context merge -// / -func mergeRoot(a, b SingletonPredictionContext, rootIsWildcard bool) PredictionContext { - if rootIsWildcard { - if a == BasePredictionContextEMPTY { - return BasePredictionContextEMPTY // // + b =// - } - if b == BasePredictionContextEMPTY { - return BasePredictionContextEMPTY // a +// =// - } - } else { - if a == BasePredictionContextEMPTY && b == BasePredictionContextEMPTY { - return BasePredictionContextEMPTY // $ + $ = $ - } else if a == BasePredictionContextEMPTY { // $ + x = [$,x] - payloads := []int{b.getReturnState(-1), BasePredictionContextEmptyReturnState} - parents := []PredictionContext{b.GetParent(-1), nil} - return NewArrayPredictionContext(parents, payloads) - } else if b == BasePredictionContextEMPTY { // x + $ = [$,x] ($ is always first if present) - payloads := []int{a.getReturnState(-1), BasePredictionContextEmptyReturnState} - parents := []PredictionContext{a.GetParent(-1), nil} - return NewArrayPredictionContext(parents, payloads) - } - } - return nil -} - -// Merge two {@link ArrayBasePredictionContext} instances. -// -//

Different tops, different parents.
-//

-// -//

Shared top, same parents.
-//

-// -//

Shared top, different parents.
-//

-// -//

Shared top, all shared parents.
-//

-// -//

Equal tops, merge parents and reduce top to -// {@link SingletonBasePredictionContext}.
-//

-// / -func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext { - if mergeCache != nil { - previous := mergeCache.Get(a.Hash(), b.Hash()) - if previous != nil { - if ParserATNSimulatorTraceATNSim { - fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous") - } - return previous.(PredictionContext) - } - previous = mergeCache.Get(b.Hash(), a.Hash()) - if previous != nil { - if ParserATNSimulatorTraceATNSim { - fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous") - } - return previous.(PredictionContext) - } - } - // merge sorted payloads a + b => M - i := 0 // walks a - j := 0 // walks b - k := 0 // walks target M array - - mergedReturnStates := make([]int, len(a.returnStates)+len(b.returnStates)) - mergedParents := make([]PredictionContext, len(a.returnStates)+len(b.returnStates)) - // walk and merge to yield mergedParents, mergedReturnStates - for i < len(a.returnStates) && j < len(b.returnStates) { - aParent := a.parents[i] - bParent := b.parents[j] - if a.returnStates[i] == b.returnStates[j] { - // same payload (stack tops are equal), must yield merged singleton - payload := a.returnStates[i] - // $+$ = $ - bothDollars := payload == BasePredictionContextEmptyReturnState && aParent == nil && bParent == nil - axAX := aParent != nil && bParent != nil && aParent == bParent // ax+ax - // -> - // ax - if bothDollars || axAX { - mergedParents[k] = aParent // choose left - mergedReturnStates[k] = payload - } else { // ax+ay -> a'[x,y] - mergedParent := merge(aParent, bParent, rootIsWildcard, mergeCache) - mergedParents[k] = mergedParent - mergedReturnStates[k] = payload - } - i++ // hop over left one as usual - j++ // but also Skip one in right side since we merge - } else if a.returnStates[i] < b.returnStates[j] { // copy a[i] to M - mergedParents[k] = aParent - mergedReturnStates[k] = a.returnStates[i] - i++ - } else { // b > a, copy b[j] to M - mergedParents[k] = bParent - mergedReturnStates[k] = b.returnStates[j] - j++ - } - k++ - } - // copy over any payloads remaining in either array - if i < len(a.returnStates) { - for p := i; p < len(a.returnStates); p++ { - mergedParents[k] = a.parents[p] - mergedReturnStates[k] = a.returnStates[p] - k++ - } - } else { - for p := j; p < len(b.returnStates); p++ { - mergedParents[k] = b.parents[p] - mergedReturnStates[k] = b.returnStates[p] - k++ - } - } - // trim merged if we combined a few that had same stack tops - if k < len(mergedParents) { // write index < last position trim - if k == 1 { // for just one merged element, return singleton top - pc := SingletonBasePredictionContextCreate(mergedParents[0], mergedReturnStates[0]) - if mergeCache != nil { - mergeCache.set(a.Hash(), b.Hash(), pc) - } - return pc - } - mergedParents = mergedParents[0:k] - mergedReturnStates = mergedReturnStates[0:k] - } - - M := NewArrayPredictionContext(mergedParents, mergedReturnStates) - - // if we created same array as a or b, return that instead - // TODO: track whether this is possible above during merge sort for speed - // TODO: In go, I do not think we can just do M == xx as M is a brand new allocation. This could be causing allocation problems - if M == a { - if mergeCache != nil { - mergeCache.set(a.Hash(), b.Hash(), a) - } - if ParserATNSimulatorTraceATNSim { - fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> a") - } - return a - } - if M == b { - if mergeCache != nil { - mergeCache.set(a.Hash(), b.Hash(), b) - } - if ParserATNSimulatorTraceATNSim { - fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> b") - } - return b - } - combineCommonParents(mergedParents) - - if mergeCache != nil { - mergeCache.set(a.Hash(), b.Hash(), M) - } - if ParserATNSimulatorTraceATNSim { - fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> " + M.String()) - } - return M -} - -// Make pass over all M {@code parents} merge any {@code equals()} -// ones. -// / -func combineCommonParents(parents []PredictionContext) { - uniqueParents := make(map[PredictionContext]PredictionContext) - - for p := 0; p < len(parents); p++ { - parent := parents[p] - if uniqueParents[parent] == nil { - uniqueParents[parent] = parent - } - } - for q := 0; q < len(parents); q++ { - parents[q] = uniqueParents[parents[q]] - } -} - -func getCachedBasePredictionContext(context PredictionContext, contextCache *PredictionContextCache, visited map[PredictionContext]PredictionContext) PredictionContext { - - if context.isEmpty() { - return context - } - existing := visited[context] - if existing != nil { - return existing - } - existing = contextCache.Get(context) - if existing != nil { - visited[context] = existing - return existing - } - changed := false - parents := make([]PredictionContext, context.length()) - for i := 0; i < len(parents); i++ { - parent := getCachedBasePredictionContext(context.GetParent(i), contextCache, visited) - if changed || parent != context.GetParent(i) { - if !changed { - parents = make([]PredictionContext, context.length()) - for j := 0; j < context.length(); j++ { - parents[j] = context.GetParent(j) - } - changed = true - } - parents[i] = parent - } - } - if !changed { - contextCache.add(context) - visited[context] = context - return context - } - var updated PredictionContext - if len(parents) == 0 { - updated = BasePredictionContextEMPTY - } else if len(parents) == 1 { - updated = SingletonBasePredictionContextCreate(parents[0], context.getReturnState(0)) - } else { - updated = NewArrayPredictionContext(parents, context.(*ArrayPredictionContext).GetReturnStates()) - } - contextCache.add(updated) - visited[updated] = updated - visited[context] = updated - - return updated -} +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "golang.org/x/exp/slices" + "strconv" +) + +// Represents {@code $} in local context prediction, which means wildcard. +// {@code//+x =//}. +// / +const ( + BasePredictionContextEmptyReturnState = 0x7FFFFFFF +) + +// Represents {@code $} in an array in full context mode, when {@code $} +// doesn't mean wildcard: {@code $ + x = [$,x]}. Here, +// {@code $} = {@link //EmptyReturnState}. +// / + +var ( + BasePredictionContextglobalNodeCount = 1 + BasePredictionContextid = BasePredictionContextglobalNodeCount +) + +type PredictionContext interface { + Hash() int + Equals(interface{}) bool + GetParent(int) PredictionContext + getReturnState(int) int + length() int + isEmpty() bool + hasEmptyPath() bool + String() string +} + +type BasePredictionContext struct { + cachedHash int +} + +func NewBasePredictionContext(cachedHash int) *BasePredictionContext { + pc := new(BasePredictionContext) + pc.cachedHash = cachedHash + + return pc +} + +func (b *BasePredictionContext) isEmpty() bool { + return false +} + +func calculateHash(parent PredictionContext, returnState int) int { + h := murmurInit(1) + h = murmurUpdate(h, parent.Hash()) + h = murmurUpdate(h, returnState) + return murmurFinish(h, 2) +} + +var _emptyPredictionContextHash int + +func init() { + _emptyPredictionContextHash = murmurInit(1) + _emptyPredictionContextHash = murmurFinish(_emptyPredictionContextHash, 0) +} + +func calculateEmptyHash() int { + return _emptyPredictionContextHash +} + +// Used to cache {@link BasePredictionContext} objects. Its used for the shared +// context cash associated with contexts in DFA states. This cache +// can be used for both lexers and parsers. + +type PredictionContextCache struct { + cache map[PredictionContext]PredictionContext +} + +func NewPredictionContextCache() *PredictionContextCache { + t := new(PredictionContextCache) + t.cache = make(map[PredictionContext]PredictionContext) + return t +} + +// Add a context to the cache and return it. If the context already exists, +// return that one instead and do not add a Newcontext to the cache. +// Protect shared cache from unsafe thread access. +func (p *PredictionContextCache) add(ctx PredictionContext) PredictionContext { + if ctx == BasePredictionContextEMPTY { + return BasePredictionContextEMPTY + } + existing := p.cache[ctx] + if existing != nil { + return existing + } + p.cache[ctx] = ctx + return ctx +} + +func (p *PredictionContextCache) Get(ctx PredictionContext) PredictionContext { + return p.cache[ctx] +} + +func (p *PredictionContextCache) length() int { + return len(p.cache) +} + +type SingletonPredictionContext interface { + PredictionContext +} + +type BaseSingletonPredictionContext struct { + *BasePredictionContext + + parentCtx PredictionContext + returnState int +} + +func NewBaseSingletonPredictionContext(parent PredictionContext, returnState int) *BaseSingletonPredictionContext { + var cachedHash int + if parent != nil { + cachedHash = calculateHash(parent, returnState) + } else { + cachedHash = calculateEmptyHash() + } + + s := new(BaseSingletonPredictionContext) + s.BasePredictionContext = NewBasePredictionContext(cachedHash) + + s.parentCtx = parent + s.returnState = returnState + + return s +} + +func SingletonBasePredictionContextCreate(parent PredictionContext, returnState int) PredictionContext { + if returnState == BasePredictionContextEmptyReturnState && parent == nil { + // someone can pass in the bits of an array ctx that mean $ + return BasePredictionContextEMPTY + } + + return NewBaseSingletonPredictionContext(parent, returnState) +} + +func (b *BaseSingletonPredictionContext) length() int { + return 1 +} + +func (b *BaseSingletonPredictionContext) GetParent(index int) PredictionContext { + return b.parentCtx +} + +func (b *BaseSingletonPredictionContext) getReturnState(index int) int { + return b.returnState +} + +func (b *BaseSingletonPredictionContext) hasEmptyPath() bool { + return b.returnState == BasePredictionContextEmptyReturnState +} + +func (b *BaseSingletonPredictionContext) Hash() int { + return b.cachedHash +} + +func (b *BaseSingletonPredictionContext) Equals(other interface{}) bool { + if b == other { + return true + } + if _, ok := other.(*BaseSingletonPredictionContext); !ok { + return false + } + + otherP := other.(*BaseSingletonPredictionContext) + + if b.returnState != otherP.getReturnState(0) { + return false + } + if b.parentCtx == nil { + return otherP.parentCtx == nil + } + + return b.parentCtx.Equals(otherP.parentCtx) +} + +func (b *BaseSingletonPredictionContext) String() string { + var up string + + if b.parentCtx == nil { + up = "" + } else { + up = b.parentCtx.String() + } + + if len(up) == 0 { + if b.returnState == BasePredictionContextEmptyReturnState { + return "$" + } + + return strconv.Itoa(b.returnState) + } + + return strconv.Itoa(b.returnState) + " " + up +} + +var BasePredictionContextEMPTY = NewEmptyPredictionContext() + +type EmptyPredictionContext struct { + *BaseSingletonPredictionContext +} + +func NewEmptyPredictionContext() *EmptyPredictionContext { + + p := new(EmptyPredictionContext) + + p.BaseSingletonPredictionContext = NewBaseSingletonPredictionContext(nil, BasePredictionContextEmptyReturnState) + p.cachedHash = calculateEmptyHash() + return p +} + +func (e *EmptyPredictionContext) isEmpty() bool { + return true +} + +func (e *EmptyPredictionContext) GetParent(index int) PredictionContext { + return nil +} + +func (e *EmptyPredictionContext) getReturnState(index int) int { + return e.returnState +} + +func (e *EmptyPredictionContext) Hash() int { + return e.cachedHash +} + +func (e *EmptyPredictionContext) Equals(other interface{}) bool { + return e == other +} + +func (e *EmptyPredictionContext) String() string { + return "$" +} + +type ArrayPredictionContext struct { + *BasePredictionContext + + parents []PredictionContext + returnStates []int +} + +func NewArrayPredictionContext(parents []PredictionContext, returnStates []int) *ArrayPredictionContext { + // Parent can be nil only if full ctx mode and we make an array + // from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using + // nil parent and + // returnState == {@link //EmptyReturnState}. + hash := murmurInit(1) + + for _, parent := range parents { + hash = murmurUpdate(hash, parent.Hash()) + } + + for _, returnState := range returnStates { + hash = murmurUpdate(hash, returnState) + } + + hash = murmurFinish(hash, len(parents)<<1) + + c := new(ArrayPredictionContext) + c.BasePredictionContext = NewBasePredictionContext(hash) + + c.parents = parents + c.returnStates = returnStates + + return c +} + +func (a *ArrayPredictionContext) GetReturnStates() []int { + return a.returnStates +} + +func (a *ArrayPredictionContext) hasEmptyPath() bool { + return a.getReturnState(a.length()-1) == BasePredictionContextEmptyReturnState +} + +func (a *ArrayPredictionContext) isEmpty() bool { + // since EmptyReturnState can only appear in the last position, we + // don't need to verify that size==1 + return a.returnStates[0] == BasePredictionContextEmptyReturnState +} + +func (a *ArrayPredictionContext) length() int { + return len(a.returnStates) +} + +func (a *ArrayPredictionContext) GetParent(index int) PredictionContext { + return a.parents[index] +} + +func (a *ArrayPredictionContext) getReturnState(index int) int { + return a.returnStates[index] +} + +// Equals is the default comparison function for ArrayPredictionContext when no specialized +// implementation is needed for a collection +func (a *ArrayPredictionContext) Equals(o interface{}) bool { + if a == o { + return true + } + other, ok := o.(*ArrayPredictionContext) + if !ok { + return false + } + if a.cachedHash != other.Hash() { + return false // can't be same if hash is different + } + + // Must compare the actual array elements and not just the array address + // + return slices.Equal(a.returnStates, other.returnStates) && + slices.EqualFunc(a.parents, other.parents, func(x, y PredictionContext) bool { + return x.Equals(y) + }) +} + +// Hash is the default hash function for ArrayPredictionContext when no specialized +// implementation is needed for a collection +func (a *ArrayPredictionContext) Hash() int { + return a.BasePredictionContext.cachedHash +} + +func (a *ArrayPredictionContext) String() string { + if a.isEmpty() { + return "[]" + } + + s := "[" + for i := 0; i < len(a.returnStates); i++ { + if i > 0 { + s = s + ", " + } + if a.returnStates[i] == BasePredictionContextEmptyReturnState { + s = s + "$" + continue + } + s = s + strconv.Itoa(a.returnStates[i]) + if a.parents[i] != nil { + s = s + " " + a.parents[i].String() + } else { + s = s + "nil" + } + } + + return s + "]" +} + +// Convert a {@link RuleContext} tree to a {@link BasePredictionContext} graph. +// Return {@link //EMPTY} if {@code outerContext} is empty or nil. +// / +func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) PredictionContext { + if outerContext == nil { + outerContext = ParserRuleContextEmpty + } + // if we are in RuleContext of start rule, s, then BasePredictionContext + // is EMPTY. Nobody called us. (if we are empty, return empty) + if outerContext.GetParent() == nil || outerContext == ParserRuleContextEmpty { + return BasePredictionContextEMPTY + } + // If we have a parent, convert it to a BasePredictionContext graph + parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext)) + state := a.states[outerContext.GetInvokingState()] + transition := state.GetTransitions()[0] + + return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber()) +} + +func merge(a, b PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext { + + // Share same graph if both same + // + if a == b || a.Equals(b) { + return a + } + + // In Java, EmptyPredictionContext inherits from SingletonPredictionContext, and so the test + // in java for SingletonPredictionContext will succeed and a new ArrayPredictionContext will be created + // from it. + // In go, EmptyPredictionContext does not equate to SingletonPredictionContext and so that conversion + // will fail. We need to test for both Empty and Singleton and create an ArrayPredictionContext from + // either of them. + + ac, ok1 := a.(*BaseSingletonPredictionContext) + bc, ok2 := b.(*BaseSingletonPredictionContext) + + if ok1 && ok2 { + return mergeSingletons(ac, bc, rootIsWildcard, mergeCache) + } + // At least one of a or b is array + // If one is $ and rootIsWildcard, return $ as// wildcard + if rootIsWildcard { + if _, ok := a.(*EmptyPredictionContext); ok { + return a + } + if _, ok := b.(*EmptyPredictionContext); ok { + return b + } + } + + // Convert Singleton or Empty so both are arrays to normalize - We should not use the existing parameters + // here. + // + // TODO: I think that maybe the Prediction Context structs should be redone as there is a chance we will see this mess again - maybe redo the logic here + + var arp, arb *ArrayPredictionContext + var ok bool + if arp, ok = a.(*ArrayPredictionContext); ok { + } else if _, ok = a.(*BaseSingletonPredictionContext); ok { + arp = NewArrayPredictionContext([]PredictionContext{a.GetParent(0)}, []int{a.getReturnState(0)}) + } else if _, ok = a.(*EmptyPredictionContext); ok { + arp = NewArrayPredictionContext([]PredictionContext{}, []int{}) + } + + if arb, ok = b.(*ArrayPredictionContext); ok { + } else if _, ok = b.(*BaseSingletonPredictionContext); ok { + arb = NewArrayPredictionContext([]PredictionContext{b.GetParent(0)}, []int{b.getReturnState(0)}) + } else if _, ok = b.(*EmptyPredictionContext); ok { + arb = NewArrayPredictionContext([]PredictionContext{}, []int{}) + } + + // Both arp and arb + return mergeArrays(arp, arb, rootIsWildcard, mergeCache) +} + +// Merge two {@link SingletonBasePredictionContext} instances. +// +//

Stack tops equal, parents merge is same return left graph.
+//

+// +//

Same stack top, parents differ merge parents giving array node, then +// remainders of those graphs. A Newroot node is created to point to the +// merged parents.
+//

+// +//

Different stack tops pointing to same parent. Make array node for the +// root where both element in the root point to the same (original) +// parent.
+//

+// +//

Different stack tops pointing to different parents. Make array node for +// the root where each element points to the corresponding original +// parent.
+//

+// +// @param a the first {@link SingletonBasePredictionContext} +// @param b the second {@link SingletonBasePredictionContext} +// @param rootIsWildcard {@code true} if this is a local-context merge, +// otherwise false to indicate a full-context merge +// @param mergeCache +// / +func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext { + if mergeCache != nil { + previous := mergeCache.Get(a.Hash(), b.Hash()) + if previous != nil { + return previous.(PredictionContext) + } + previous = mergeCache.Get(b.Hash(), a.Hash()) + if previous != nil { + return previous.(PredictionContext) + } + } + + rootMerge := mergeRoot(a, b, rootIsWildcard) + if rootMerge != nil { + if mergeCache != nil { + mergeCache.set(a.Hash(), b.Hash(), rootMerge) + } + return rootMerge + } + if a.returnState == b.returnState { + parent := merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache) + // if parent is same as existing a or b parent or reduced to a parent, + // return it + if parent == a.parentCtx { + return a // ax + bx = ax, if a=b + } + if parent == b.parentCtx { + return b // ax + bx = bx, if a=b + } + // else: ax + ay = a'[x,y] + // merge parents x and y, giving array node with x,y then remainders + // of those graphs. dup a, a' points at merged array + // Newjoined parent so create Newsingleton pointing to it, a' + spc := SingletonBasePredictionContextCreate(parent, a.returnState) + if mergeCache != nil { + mergeCache.set(a.Hash(), b.Hash(), spc) + } + return spc + } + // a != b payloads differ + // see if we can collapse parents due to $+x parents if local ctx + var singleParent PredictionContext + if a == b || (a.parentCtx != nil && a.parentCtx == b.parentCtx) { // ax + + // bx = + // [a,b]x + singleParent = a.parentCtx + } + if singleParent != nil { // parents are same + // sort payloads and use same parent + payloads := []int{a.returnState, b.returnState} + if a.returnState > b.returnState { + payloads[0] = b.returnState + payloads[1] = a.returnState + } + parents := []PredictionContext{singleParent, singleParent} + apc := NewArrayPredictionContext(parents, payloads) + if mergeCache != nil { + mergeCache.set(a.Hash(), b.Hash(), apc) + } + return apc + } + // parents differ and can't merge them. Just pack together + // into array can't merge. + // ax + by = [ax,by] + payloads := []int{a.returnState, b.returnState} + parents := []PredictionContext{a.parentCtx, b.parentCtx} + if a.returnState > b.returnState { // sort by payload + payloads[0] = b.returnState + payloads[1] = a.returnState + parents = []PredictionContext{b.parentCtx, a.parentCtx} + } + apc := NewArrayPredictionContext(parents, payloads) + if mergeCache != nil { + mergeCache.set(a.Hash(), b.Hash(), apc) + } + return apc +} + +// Handle case where at least one of {@code a} or {@code b} is +// {@link //EMPTY}. In the following diagrams, the symbol {@code $} is used +// to represent {@link //EMPTY}. +// +//

Local-Context Merges

+// +//

These local-context merge operations are used when {@code rootIsWildcard} +// is true.

+// +//

{@link //EMPTY} is superset of any graph return {@link //EMPTY}.
+//

+// +//

{@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is +// {@code //EMPTY} return left graph.
+//

+// +//

Special case of last merge if local context.
+//

+// +//

Full-Context Merges

+// +//

These full-context merge operations are used when {@code rootIsWildcard} +// is false.

+// +//

+// +//

Must keep all contexts {@link //EMPTY} in array is a special value (and +// nil parent).
+//

+// +//

+// +// @param a the first {@link SingletonBasePredictionContext} +// @param b the second {@link SingletonBasePredictionContext} +// @param rootIsWildcard {@code true} if this is a local-context merge, +// otherwise false to indicate a full-context merge +// / +func mergeRoot(a, b SingletonPredictionContext, rootIsWildcard bool) PredictionContext { + if rootIsWildcard { + if a == BasePredictionContextEMPTY { + return BasePredictionContextEMPTY // // + b =// + } + if b == BasePredictionContextEMPTY { + return BasePredictionContextEMPTY // a +// =// + } + } else { + if a == BasePredictionContextEMPTY && b == BasePredictionContextEMPTY { + return BasePredictionContextEMPTY // $ + $ = $ + } else if a == BasePredictionContextEMPTY { // $ + x = [$,x] + payloads := []int{b.getReturnState(-1), BasePredictionContextEmptyReturnState} + parents := []PredictionContext{b.GetParent(-1), nil} + return NewArrayPredictionContext(parents, payloads) + } else if b == BasePredictionContextEMPTY { // x + $ = [$,x] ($ is always first if present) + payloads := []int{a.getReturnState(-1), BasePredictionContextEmptyReturnState} + parents := []PredictionContext{a.GetParent(-1), nil} + return NewArrayPredictionContext(parents, payloads) + } + } + return nil +} + +// Merge two {@link ArrayBasePredictionContext} instances. +// +//

Different tops, different parents.
+//

+// +//

Shared top, same parents.
+//

+// +//

Shared top, different parents.
+//

+// +//

Shared top, all shared parents.
+//

+// +//

Equal tops, merge parents and reduce top to +// {@link SingletonBasePredictionContext}.
+//

+// / +func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext { + if mergeCache != nil { + previous := mergeCache.Get(a.Hash(), b.Hash()) + if previous != nil { + if ParserATNSimulatorTraceATNSim { + fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous") + } + return previous.(PredictionContext) + } + previous = mergeCache.Get(b.Hash(), a.Hash()) + if previous != nil { + if ParserATNSimulatorTraceATNSim { + fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous") + } + return previous.(PredictionContext) + } + } + // merge sorted payloads a + b => M + i := 0 // walks a + j := 0 // walks b + k := 0 // walks target M array + + mergedReturnStates := make([]int, len(a.returnStates)+len(b.returnStates)) + mergedParents := make([]PredictionContext, len(a.returnStates)+len(b.returnStates)) + // walk and merge to yield mergedParents, mergedReturnStates + for i < len(a.returnStates) && j < len(b.returnStates) { + aParent := a.parents[i] + bParent := b.parents[j] + if a.returnStates[i] == b.returnStates[j] { + // same payload (stack tops are equal), must yield merged singleton + payload := a.returnStates[i] + // $+$ = $ + bothDollars := payload == BasePredictionContextEmptyReturnState && aParent == nil && bParent == nil + axAX := aParent != nil && bParent != nil && aParent == bParent // ax+ax + // -> + // ax + if bothDollars || axAX { + mergedParents[k] = aParent // choose left + mergedReturnStates[k] = payload + } else { // ax+ay -> a'[x,y] + mergedParent := merge(aParent, bParent, rootIsWildcard, mergeCache) + mergedParents[k] = mergedParent + mergedReturnStates[k] = payload + } + i++ // hop over left one as usual + j++ // but also Skip one in right side since we merge + } else if a.returnStates[i] < b.returnStates[j] { // copy a[i] to M + mergedParents[k] = aParent + mergedReturnStates[k] = a.returnStates[i] + i++ + } else { // b > a, copy b[j] to M + mergedParents[k] = bParent + mergedReturnStates[k] = b.returnStates[j] + j++ + } + k++ + } + // copy over any payloads remaining in either array + if i < len(a.returnStates) { + for p := i; p < len(a.returnStates); p++ { + mergedParents[k] = a.parents[p] + mergedReturnStates[k] = a.returnStates[p] + k++ + } + } else { + for p := j; p < len(b.returnStates); p++ { + mergedParents[k] = b.parents[p] + mergedReturnStates[k] = b.returnStates[p] + k++ + } + } + // trim merged if we combined a few that had same stack tops + if k < len(mergedParents) { // write index < last position trim + if k == 1 { // for just one merged element, return singleton top + pc := SingletonBasePredictionContextCreate(mergedParents[0], mergedReturnStates[0]) + if mergeCache != nil { + mergeCache.set(a.Hash(), b.Hash(), pc) + } + return pc + } + mergedParents = mergedParents[0:k] + mergedReturnStates = mergedReturnStates[0:k] + } + + M := NewArrayPredictionContext(mergedParents, mergedReturnStates) + + // if we created same array as a or b, return that instead + // TODO: track whether this is possible above during merge sort for speed + // TODO: In go, I do not think we can just do M == xx as M is a brand new allocation. This could be causing allocation problems + if M == a { + if mergeCache != nil { + mergeCache.set(a.Hash(), b.Hash(), a) + } + if ParserATNSimulatorTraceATNSim { + fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> a") + } + return a + } + if M == b { + if mergeCache != nil { + mergeCache.set(a.Hash(), b.Hash(), b) + } + if ParserATNSimulatorTraceATNSim { + fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> b") + } + return b + } + combineCommonParents(mergedParents) + + if mergeCache != nil { + mergeCache.set(a.Hash(), b.Hash(), M) + } + if ParserATNSimulatorTraceATNSim { + fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> " + M.String()) + } + return M +} + +// Make pass over all M {@code parents} merge any {@code equals()} +// ones. +// / +func combineCommonParents(parents []PredictionContext) { + uniqueParents := make(map[PredictionContext]PredictionContext) + + for p := 0; p < len(parents); p++ { + parent := parents[p] + if uniqueParents[parent] == nil { + uniqueParents[parent] = parent + } + } + for q := 0; q < len(parents); q++ { + parents[q] = uniqueParents[parents[q]] + } +} + +func getCachedBasePredictionContext(context PredictionContext, contextCache *PredictionContextCache, visited map[PredictionContext]PredictionContext) PredictionContext { + + if context.isEmpty() { + return context + } + existing := visited[context] + if existing != nil { + return existing + } + existing = contextCache.Get(context) + if existing != nil { + visited[context] = existing + return existing + } + changed := false + parents := make([]PredictionContext, context.length()) + for i := 0; i < len(parents); i++ { + parent := getCachedBasePredictionContext(context.GetParent(i), contextCache, visited) + if changed || parent != context.GetParent(i) { + if !changed { + parents = make([]PredictionContext, context.length()) + for j := 0; j < context.length(); j++ { + parents[j] = context.GetParent(j) + } + changed = true + } + parents[i] = parent + } + } + if !changed { + contextCache.add(context) + visited[context] = context + return context + } + var updated PredictionContext + if len(parents) == 0 { + updated = BasePredictionContextEMPTY + } else if len(parents) == 1 { + updated = SingletonBasePredictionContextCreate(parents[0], context.getReturnState(0)) + } else { + updated = NewArrayPredictionContext(parents, context.(*ArrayPredictionContext).GetReturnStates()) + } + contextCache.add(updated) + visited[updated] = updated + visited[context] = updated + + return updated +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_mode.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_mode.go index 7b9b72fab1..87904bd8ab 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_mode.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/prediction_mode.go @@ -1,529 +1,529 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -// This enumeration defines the prediction modes available in ANTLR 4 along with -// utility methods for analyzing configuration sets for conflicts and/or -// ambiguities. - -const ( - // - // The SLL(*) prediction mode. This prediction mode ignores the current - // parser context when making predictions. This is the fastest prediction - // mode, and provides correct results for many grammars. This prediction - // mode is more powerful than the prediction mode provided by ANTLR 3, but - // may result in syntax errors for grammar and input combinations which are - // not SLL. - // - //

- // When using this prediction mode, the parser will either return a correct - // parse tree (i.e. the same parse tree that would be returned with the - // {@link //LL} prediction mode), or it will Report a syntax error. If a - // syntax error is encountered when using the {@link //SLL} prediction mode, - // it may be due to either an actual syntax error in the input or indicate - // that the particular combination of grammar and input requires the more - // powerful {@link //LL} prediction abilities to complete successfully.

- // - //

- // This prediction mode does not provide any guarantees for prediction - // behavior for syntactically-incorrect inputs.

- // - PredictionModeSLL = 0 - // - // The LL(*) prediction mode. This prediction mode allows the current parser - // context to be used for resolving SLL conflicts that occur during - // prediction. This is the fastest prediction mode that guarantees correct - // parse results for all combinations of grammars with syntactically correct - // inputs. - // - //

- // When using this prediction mode, the parser will make correct decisions - // for all syntactically-correct grammar and input combinations. However, in - // cases where the grammar is truly ambiguous this prediction mode might not - // Report a precise answer for exactly which alternatives are - // ambiguous.

- // - //

- // This prediction mode does not provide any guarantees for prediction - // behavior for syntactically-incorrect inputs.

- // - PredictionModeLL = 1 - // - // The LL(*) prediction mode with exact ambiguity detection. In addition to - // the correctness guarantees provided by the {@link //LL} prediction mode, - // this prediction mode instructs the prediction algorithm to determine the - // complete and exact set of ambiguous alternatives for every ambiguous - // decision encountered while parsing. - // - //

- // This prediction mode may be used for diagnosing ambiguities during - // grammar development. Due to the performance overhead of calculating sets - // of ambiguous alternatives, this prediction mode should be avoided when - // the exact results are not necessary.

- // - //

- // This prediction mode does not provide any guarantees for prediction - // behavior for syntactically-incorrect inputs.

- // - PredictionModeLLExactAmbigDetection = 2 -) - -// Computes the SLL prediction termination condition. -// -//

-// This method computes the SLL prediction termination condition for both of -// the following cases.

-// -//
    -//
  • The usual SLL+LL fallback upon SLL conflict
  • -//
  • Pure SLL without LL fallback
  • -//
-// -//

COMBINED SLL+LL PARSING

-// -//

When LL-fallback is enabled upon SLL conflict, correct predictions are -// ensured regardless of how the termination condition is computed by this -// method. Due to the substantially higher cost of LL prediction, the -// prediction should only fall back to LL when the additional lookahead -// cannot lead to a unique SLL prediction.

-// -//

Assuming combined SLL+LL parsing, an SLL configuration set with only -// conflicting subsets should fall back to full LL, even if the -// configuration sets don't resolve to the same alternative (e.g. -// {@code {1,2}} and {@code {3,4}}. If there is at least one non-conflicting -// configuration, SLL could continue with the hopes that more lookahead will -// resolve via one of those non-conflicting configurations.

-// -//

Here's the prediction termination rule them: SLL (for SLL+LL parsing) -// stops when it sees only conflicting configuration subsets. In contrast, -// full LL keeps going when there is uncertainty.

-// -//

HEURISTIC

-// -//

As a heuristic, we stop prediction when we see any conflicting subset -// unless we see a state that only has one alternative associated with it. -// The single-alt-state thing lets prediction continue upon rules like -// (otherwise, it would admit defeat too soon):

-// -//

{@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ” }

-// -//

When the ATN simulation reaches the state before {@code ”}, it has a -// DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally -// {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop -// processing this node because alternative to has another way to continue, -// via {@code [6|2|[]]}.

-// -//

It also let's us continue for this rule:

-// -//

{@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B }

-// -//

After Matching input A, we reach the stop state for rule A, state 1. -// State 8 is the state right before B. Clearly alternatives 1 and 2 -// conflict and no amount of further lookahead will separate the two. -// However, alternative 3 will be able to continue and so we do not stop -// working on this state. In the previous example, we're concerned with -// states associated with the conflicting alternatives. Here alt 3 is not -// associated with the conflicting configs, but since we can continue -// looking for input reasonably, don't declare the state done.

-// -//

PURE SLL PARSING

-// -//

To handle pure SLL parsing, all we have to do is make sure that we -// combine stack contexts for configurations that differ only by semantic -// predicate. From there, we can do the usual SLL termination heuristic.

-// -//

PREDICATES IN SLL+LL PARSING

-// -//

SLL decisions don't evaluate predicates until after they reach DFA stop -// states because they need to create the DFA cache that works in all -// semantic situations. In contrast, full LL evaluates predicates collected -// during start state computation so it can ignore predicates thereafter. -// This means that SLL termination detection can totally ignore semantic -// predicates.

-// -//

Implementation-wise, {@link ATNConfigSet} combines stack contexts but not -// semantic predicate contexts so we might see two configurations like the -// following.

-// -//

{@code (s, 1, x, {}), (s, 1, x', {p})}

-// -//

Before testing these configurations against others, we have to merge -// {@code x} and {@code x'} (without modifying the existing configurations). -// For example, we test {@code (x+x')==x”} when looking for conflicts in -// the following configurations.

-// -//

{@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x”, {})}

-// -//

If the configuration set has predicates (as indicated by -// {@link ATNConfigSet//hasSemanticContext}), this algorithm makes a copy of -// the configurations to strip out all of the predicates so that a standard -// {@link ATNConfigSet} will merge everything ignoring predicates.

-func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs ATNConfigSet) bool { - // Configs in rule stop states indicate reaching the end of the decision - // rule (local context) or end of start rule (full context). If all - // configs meet this condition, then none of the configurations is able - // to Match additional input so we terminate prediction. - // - if PredictionModeallConfigsInRuleStopStates(configs) { - return true - } - // pure SLL mode parsing - if mode == PredictionModeSLL { - // Don't bother with combining configs from different semantic - // contexts if we can fail over to full LL costs more time - // since we'll often fail over anyway. - if configs.HasSemanticContext() { - // dup configs, tossing out semantic predicates - dup := NewBaseATNConfigSet(false) - for _, c := range configs.GetItems() { - - // NewBaseATNConfig({semanticContext:}, c) - c = NewBaseATNConfig2(c, SemanticContextNone) - dup.Add(c, nil) - } - configs = dup - } - // now we have combined contexts for configs with dissimilar preds - } - // pure SLL or combined SLL+LL mode parsing - altsets := PredictionModegetConflictingAltSubsets(configs) - return PredictionModehasConflictingAltSet(altsets) && !PredictionModehasStateAssociatedWithOneAlt(configs) -} - -// Checks if any configuration in {@code configs} is in a -// {@link RuleStopState}. Configurations meeting this condition have reached -// the end of the decision rule (local context) or end of start rule (full -// context). -// -// @param configs the configuration set to test -// @return {@code true} if any configuration in {@code configs} is in a -// {@link RuleStopState}, otherwise {@code false} -func PredictionModehasConfigInRuleStopState(configs ATNConfigSet) bool { - for _, c := range configs.GetItems() { - if _, ok := c.GetState().(*RuleStopState); ok { - return true - } - } - return false -} - -// Checks if all configurations in {@code configs} are in a -// {@link RuleStopState}. Configurations meeting this condition have reached -// the end of the decision rule (local context) or end of start rule (full -// context). -// -// @param configs the configuration set to test -// @return {@code true} if all configurations in {@code configs} are in a -// {@link RuleStopState}, otherwise {@code false} -func PredictionModeallConfigsInRuleStopStates(configs ATNConfigSet) bool { - - for _, c := range configs.GetItems() { - if _, ok := c.GetState().(*RuleStopState); !ok { - return false - } - } - return true -} - -// Full LL prediction termination. -// -//

Can we stop looking ahead during ATN simulation or is there some -// uncertainty as to which alternative we will ultimately pick, after -// consuming more input? Even if there are partial conflicts, we might know -// that everything is going to resolve to the same minimum alternative. That -// means we can stop since no more lookahead will change that fact. On the -// other hand, there might be multiple conflicts that resolve to different -// minimums. That means we need more look ahead to decide which of those -// alternatives we should predict.

-// -//

The basic idea is to split the set of configurations {@code C}, into -// conflicting subsets {@code (s, _, ctx, _)} and singleton subsets with -// non-conflicting configurations. Two configurations conflict if they have -// identical {@link ATNConfig//state} and {@link ATNConfig//context} values -// but different {@link ATNConfig//alt} value, e.g. {@code (s, i, ctx, _)} -// and {@code (s, j, ctx, _)} for {@code i!=j}.

-// -//

Reduce these configuration subsets to the set of possible alternatives. -// You can compute the alternative subsets in one pass as follows:

-// -//

{@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in -// {@code C} holding {@code s} and {@code ctx} fixed.

-// -//

Or in pseudo-code, for each configuration {@code c} in {@code C}:

-// -//
-// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
-// alt and not pred
-// 
-// -//

The values in {@code map} are the set of {@code A_s,ctx} sets.

-// -//

If {@code |A_s,ctx|=1} then there is no conflict associated with -// {@code s} and {@code ctx}.

-// -//

Reduce the subsets to singletons by choosing a minimum of each subset. If -// the union of these alternative subsets is a singleton, then no amount of -// more lookahead will help us. We will always pick that alternative. If, -// however, there is more than one alternative, then we are uncertain which -// alternative to predict and must continue looking for resolution. We may -// or may not discover an ambiguity in the future, even if there are no -// conflicting subsets this round.

-// -//

The biggest sin is to terminate early because it means we've made a -// decision but were uncertain as to the eventual outcome. We haven't used -// enough lookahead. On the other hand, announcing a conflict too late is no -// big deal you will still have the conflict. It's just inefficient. It -// might even look until the end of file.

-// -//

No special consideration for semantic predicates is required because -// predicates are evaluated on-the-fly for full LL prediction, ensuring that -// no configuration contains a semantic context during the termination -// check.

-// -//

CONFLICTING CONFIGS

-// -//

Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict -// when {@code i!=j} but {@code x=x'}. Because we merge all -// {@code (s, i, _)} configurations together, that means that there are at -// most {@code n} configurations associated with state {@code s} for -// {@code n} possible alternatives in the decision. The merged stacks -// complicate the comparison of configuration contexts {@code x} and -// {@code x'}. Sam checks to see if one is a subset of the other by calling -// merge and checking to see if the merged result is either {@code x} or -// {@code x'}. If the {@code x} associated with lowest alternative {@code i} -// is the superset, then {@code i} is the only possible prediction since the -// others resolve to {@code min(i)} as well. However, if {@code x} is -// associated with {@code j>i} then at least one stack configuration for -// {@code j} is not in conflict with alternative {@code i}. The algorithm -// should keep going, looking for more lookahead due to the uncertainty.

-// -//

For simplicity, I'm doing a equality check between {@code x} and -// {@code x'} that lets the algorithm continue to consume lookahead longer -// than necessary. The reason I like the equality is of course the -// simplicity but also because that is the test you need to detect the -// alternatives that are actually in conflict.

-// -//

CONTINUE/STOP RULE

-// -//

Continue if union of resolved alternative sets from non-conflicting and -// conflicting alternative subsets has more than one alternative. We are -// uncertain about which alternative to predict.

-// -//

The complete set of alternatives, {@code [i for (_,i,_)]}, tells us which -// alternatives are still in the running for the amount of input we've -// consumed at this point. The conflicting sets let us to strip away -// configurations that won't lead to more states because we resolve -// conflicts to the configuration with a minimum alternate for the -// conflicting set.

-// -//

CASES

-// -//
    -// -//
  • no conflicts and more than 1 alternative in set => continue
  • -// -//
  • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s, 3, z)}, -// {@code (s', 1, y)}, {@code (s', 2, y)} yields non-conflicting set -// {@code {3}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} = -// {@code {1,3}} => continue -//
  • -// -//
  • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)}, -// {@code (s', 2, y)}, {@code (s”, 1, z)} yields non-conflicting set -// {@code {1}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} = -// {@code {1}} => stop and predict 1
  • -// -//
  • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)}, -// {@code (s', 2, y)} yields conflicting, reduced sets {@code {1}} U -// {@code {1}} = {@code {1}} => stop and predict 1, can announce -// ambiguity {@code {1,2}}
  • -// -//
  • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 2, y)}, -// {@code (s', 3, y)} yields conflicting, reduced sets {@code {1}} U -// {@code {2}} = {@code {1,2}} => continue
  • -// -//
  • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 3, y)}, -// {@code (s', 4, y)} yields conflicting, reduced sets {@code {1}} U -// {@code {3}} = {@code {1,3}} => continue
  • -// -//
-// -//

EXACT AMBIGUITY DETECTION

-// -//

If all states Report the same conflicting set of alternatives, then we -// know we have the exact ambiguity set.

-// -//

|A_i|>1 and -// A_i = A_j for all i, j.

-// -//

In other words, we continue examining lookahead until all {@code A_i} -// have more than one alternative and all {@code A_i} are the same. If -// {@code A={{1,2}, {1,3}}}, then regular LL prediction would terminate -// because the resolved set is {@code {1}}. To determine what the real -// ambiguity is, we have to know whether the ambiguity is between one and -// two or one and three so we keep going. We can only stop prediction when -// we need exact ambiguity detection when the sets look like -// {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...

-func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int { - return PredictionModegetSingleViableAlt(altsets) -} - -// Determines if every alternative subset in {@code altsets} contains more -// than one alternative. -// -// @param altsets a collection of alternative subsets -// @return {@code true} if every {@link BitSet} in {@code altsets} has -// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false} -func PredictionModeallSubsetsConflict(altsets []*BitSet) bool { - return !PredictionModehasNonConflictingAltSet(altsets) -} - -// Determines if any single alternative subset in {@code altsets} contains -// exactly one alternative. -// -// @param altsets a collection of alternative subsets -// @return {@code true} if {@code altsets} contains a {@link BitSet} with -// {@link BitSet//cardinality cardinality} 1, otherwise {@code false} -func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool { - for i := 0; i < len(altsets); i++ { - alts := altsets[i] - if alts.length() == 1 { - return true - } - } - return false -} - -// Determines if any single alternative subset in {@code altsets} contains -// more than one alternative. -// -// @param altsets a collection of alternative subsets -// @return {@code true} if {@code altsets} contains a {@link BitSet} with -// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false} -func PredictionModehasConflictingAltSet(altsets []*BitSet) bool { - for i := 0; i < len(altsets); i++ { - alts := altsets[i] - if alts.length() > 1 { - return true - } - } - return false -} - -// Determines if every alternative subset in {@code altsets} is equivalent. -// -// @param altsets a collection of alternative subsets -// @return {@code true} if every member of {@code altsets} is equal to the -// others, otherwise {@code false} -func PredictionModeallSubsetsEqual(altsets []*BitSet) bool { - var first *BitSet - - for i := 0; i < len(altsets); i++ { - alts := altsets[i] - if first == nil { - first = alts - } else if alts != first { - return false - } - } - - return true -} - -// Returns the unique alternative predicted by all alternative subsets in -// {@code altsets}. If no such alternative exists, this method returns -// {@link ATN//INVALID_ALT_NUMBER}. -// -// @param altsets a collection of alternative subsets -func PredictionModegetUniqueAlt(altsets []*BitSet) int { - all := PredictionModeGetAlts(altsets) - if all.length() == 1 { - return all.minValue() - } - - return ATNInvalidAltNumber -} - -// Gets the complete set of represented alternatives for a collection of -// alternative subsets. This method returns the union of each {@link BitSet} -// in {@code altsets}. -// -// @param altsets a collection of alternative subsets -// @return the set of represented alternatives in {@code altsets} -func PredictionModeGetAlts(altsets []*BitSet) *BitSet { - all := NewBitSet() - for _, alts := range altsets { - all.or(alts) - } - return all -} - -// PredictionModegetConflictingAltSubsets gets the conflicting alt subsets from a configuration set. -// For each configuration {@code c} in {@code configs}: -// -//
-// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
-// alt and not pred
-// 
-func PredictionModegetConflictingAltSubsets(configs ATNConfigSet) []*BitSet { - configToAlts := NewJMap[ATNConfig, *BitSet, *ATNAltConfigComparator[ATNConfig]](atnAltCfgEqInst) - - for _, c := range configs.GetItems() { - - alts, ok := configToAlts.Get(c) - if !ok { - alts = NewBitSet() - configToAlts.Put(c, alts) - } - alts.add(c.GetAlt()) - } - - return configToAlts.Values() -} - -// PredictionModeGetStateToAltMap gets a map from state to alt subset from a configuration set. For each -// configuration {@code c} in {@code configs}: -// -//
-// map[c.{@link ATNConfig//state state}] U= c.{@link ATNConfig//alt alt}
-// 
-func PredictionModeGetStateToAltMap(configs ATNConfigSet) *AltDict { - m := NewAltDict() - - for _, c := range configs.GetItems() { - alts := m.Get(c.GetState().String()) - if alts == nil { - alts = NewBitSet() - m.put(c.GetState().String(), alts) - } - alts.(*BitSet).add(c.GetAlt()) - } - return m -} - -func PredictionModehasStateAssociatedWithOneAlt(configs ATNConfigSet) bool { - values := PredictionModeGetStateToAltMap(configs).values() - for i := 0; i < len(values); i++ { - if values[i].(*BitSet).length() == 1 { - return true - } - } - return false -} - -func PredictionModegetSingleViableAlt(altsets []*BitSet) int { - result := ATNInvalidAltNumber - - for i := 0; i < len(altsets); i++ { - alts := altsets[i] - minAlt := alts.minValue() - if result == ATNInvalidAltNumber { - result = minAlt - } else if result != minAlt { // more than 1 viable alt - return ATNInvalidAltNumber - } - } - return result -} +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// This enumeration defines the prediction modes available in ANTLR 4 along with +// utility methods for analyzing configuration sets for conflicts and/or +// ambiguities. + +const ( + // + // The SLL(*) prediction mode. This prediction mode ignores the current + // parser context when making predictions. This is the fastest prediction + // mode, and provides correct results for many grammars. This prediction + // mode is more powerful than the prediction mode provided by ANTLR 3, but + // may result in syntax errors for grammar and input combinations which are + // not SLL. + // + //

+ // When using this prediction mode, the parser will either return a correct + // parse tree (i.e. the same parse tree that would be returned with the + // {@link //LL} prediction mode), or it will Report a syntax error. If a + // syntax error is encountered when using the {@link //SLL} prediction mode, + // it may be due to either an actual syntax error in the input or indicate + // that the particular combination of grammar and input requires the more + // powerful {@link //LL} prediction abilities to complete successfully.

+ // + //

+ // This prediction mode does not provide any guarantees for prediction + // behavior for syntactically-incorrect inputs.

+ // + PredictionModeSLL = 0 + // + // The LL(*) prediction mode. This prediction mode allows the current parser + // context to be used for resolving SLL conflicts that occur during + // prediction. This is the fastest prediction mode that guarantees correct + // parse results for all combinations of grammars with syntactically correct + // inputs. + // + //

+ // When using this prediction mode, the parser will make correct decisions + // for all syntactically-correct grammar and input combinations. However, in + // cases where the grammar is truly ambiguous this prediction mode might not + // Report a precise answer for exactly which alternatives are + // ambiguous.

+ // + //

+ // This prediction mode does not provide any guarantees for prediction + // behavior for syntactically-incorrect inputs.

+ // + PredictionModeLL = 1 + // + // The LL(*) prediction mode with exact ambiguity detection. In addition to + // the correctness guarantees provided by the {@link //LL} prediction mode, + // this prediction mode instructs the prediction algorithm to determine the + // complete and exact set of ambiguous alternatives for every ambiguous + // decision encountered while parsing. + // + //

+ // This prediction mode may be used for diagnosing ambiguities during + // grammar development. Due to the performance overhead of calculating sets + // of ambiguous alternatives, this prediction mode should be avoided when + // the exact results are not necessary.

+ // + //

+ // This prediction mode does not provide any guarantees for prediction + // behavior for syntactically-incorrect inputs.

+ // + PredictionModeLLExactAmbigDetection = 2 +) + +// Computes the SLL prediction termination condition. +// +//

+// This method computes the SLL prediction termination condition for both of +// the following cases.

+// +//
    +//
  • The usual SLL+LL fallback upon SLL conflict
  • +//
  • Pure SLL without LL fallback
  • +//
+// +//

COMBINED SLL+LL PARSING

+// +//

When LL-fallback is enabled upon SLL conflict, correct predictions are +// ensured regardless of how the termination condition is computed by this +// method. Due to the substantially higher cost of LL prediction, the +// prediction should only fall back to LL when the additional lookahead +// cannot lead to a unique SLL prediction.

+// +//

Assuming combined SLL+LL parsing, an SLL configuration set with only +// conflicting subsets should fall back to full LL, even if the +// configuration sets don't resolve to the same alternative (e.g. +// {@code {1,2}} and {@code {3,4}}. If there is at least one non-conflicting +// configuration, SLL could continue with the hopes that more lookahead will +// resolve via one of those non-conflicting configurations.

+// +//

Here's the prediction termination rule them: SLL (for SLL+LL parsing) +// stops when it sees only conflicting configuration subsets. In contrast, +// full LL keeps going when there is uncertainty.

+// +//

HEURISTIC

+// +//

As a heuristic, we stop prediction when we see any conflicting subset +// unless we see a state that only has one alternative associated with it. +// The single-alt-state thing lets prediction continue upon rules like +// (otherwise, it would admit defeat too soon):

+// +//

{@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ” }

+// +//

When the ATN simulation reaches the state before {@code ”}, it has a +// DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally +// {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop +// processing this node because alternative to has another way to continue, +// via {@code [6|2|[]]}.

+// +//

It also let's us continue for this rule:

+// +//

{@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B }

+// +//

After Matching input A, we reach the stop state for rule A, state 1. +// State 8 is the state right before B. Clearly alternatives 1 and 2 +// conflict and no amount of further lookahead will separate the two. +// However, alternative 3 will be able to continue and so we do not stop +// working on this state. In the previous example, we're concerned with +// states associated with the conflicting alternatives. Here alt 3 is not +// associated with the conflicting configs, but since we can continue +// looking for input reasonably, don't declare the state done.

+// +//

PURE SLL PARSING

+// +//

To handle pure SLL parsing, all we have to do is make sure that we +// combine stack contexts for configurations that differ only by semantic +// predicate. From there, we can do the usual SLL termination heuristic.

+// +//

PREDICATES IN SLL+LL PARSING

+// +//

SLL decisions don't evaluate predicates until after they reach DFA stop +// states because they need to create the DFA cache that works in all +// semantic situations. In contrast, full LL evaluates predicates collected +// during start state computation so it can ignore predicates thereafter. +// This means that SLL termination detection can totally ignore semantic +// predicates.

+// +//

Implementation-wise, {@link ATNConfigSet} combines stack contexts but not +// semantic predicate contexts so we might see two configurations like the +// following.

+// +//

{@code (s, 1, x, {}), (s, 1, x', {p})}

+// +//

Before testing these configurations against others, we have to merge +// {@code x} and {@code x'} (without modifying the existing configurations). +// For example, we test {@code (x+x')==x”} when looking for conflicts in +// the following configurations.

+// +//

{@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x”, {})}

+// +//

If the configuration set has predicates (as indicated by +// {@link ATNConfigSet//hasSemanticContext}), this algorithm makes a copy of +// the configurations to strip out all of the predicates so that a standard +// {@link ATNConfigSet} will merge everything ignoring predicates.

+func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs ATNConfigSet) bool { + // Configs in rule stop states indicate reaching the end of the decision + // rule (local context) or end of start rule (full context). If all + // configs meet this condition, then none of the configurations is able + // to Match additional input so we terminate prediction. + // + if PredictionModeallConfigsInRuleStopStates(configs) { + return true + } + // pure SLL mode parsing + if mode == PredictionModeSLL { + // Don't bother with combining configs from different semantic + // contexts if we can fail over to full LL costs more time + // since we'll often fail over anyway. + if configs.HasSemanticContext() { + // dup configs, tossing out semantic predicates + dup := NewBaseATNConfigSet(false) + for _, c := range configs.GetItems() { + + // NewBaseATNConfig({semanticContext:}, c) + c = NewBaseATNConfig2(c, SemanticContextNone) + dup.Add(c, nil) + } + configs = dup + } + // now we have combined contexts for configs with dissimilar preds + } + // pure SLL or combined SLL+LL mode parsing + altsets := PredictionModegetConflictingAltSubsets(configs) + return PredictionModehasConflictingAltSet(altsets) && !PredictionModehasStateAssociatedWithOneAlt(configs) +} + +// Checks if any configuration in {@code configs} is in a +// {@link RuleStopState}. Configurations meeting this condition have reached +// the end of the decision rule (local context) or end of start rule (full +// context). +// +// @param configs the configuration set to test +// @return {@code true} if any configuration in {@code configs} is in a +// {@link RuleStopState}, otherwise {@code false} +func PredictionModehasConfigInRuleStopState(configs ATNConfigSet) bool { + for _, c := range configs.GetItems() { + if _, ok := c.GetState().(*RuleStopState); ok { + return true + } + } + return false +} + +// Checks if all configurations in {@code configs} are in a +// {@link RuleStopState}. Configurations meeting this condition have reached +// the end of the decision rule (local context) or end of start rule (full +// context). +// +// @param configs the configuration set to test +// @return {@code true} if all configurations in {@code configs} are in a +// {@link RuleStopState}, otherwise {@code false} +func PredictionModeallConfigsInRuleStopStates(configs ATNConfigSet) bool { + + for _, c := range configs.GetItems() { + if _, ok := c.GetState().(*RuleStopState); !ok { + return false + } + } + return true +} + +// Full LL prediction termination. +// +//

Can we stop looking ahead during ATN simulation or is there some +// uncertainty as to which alternative we will ultimately pick, after +// consuming more input? Even if there are partial conflicts, we might know +// that everything is going to resolve to the same minimum alternative. That +// means we can stop since no more lookahead will change that fact. On the +// other hand, there might be multiple conflicts that resolve to different +// minimums. That means we need more look ahead to decide which of those +// alternatives we should predict.

+// +//

The basic idea is to split the set of configurations {@code C}, into +// conflicting subsets {@code (s, _, ctx, _)} and singleton subsets with +// non-conflicting configurations. Two configurations conflict if they have +// identical {@link ATNConfig//state} and {@link ATNConfig//context} values +// but different {@link ATNConfig//alt} value, e.g. {@code (s, i, ctx, _)} +// and {@code (s, j, ctx, _)} for {@code i!=j}.

+// +//

Reduce these configuration subsets to the set of possible alternatives. +// You can compute the alternative subsets in one pass as follows:

+// +//

{@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in +// {@code C} holding {@code s} and {@code ctx} fixed.

+// +//

Or in pseudo-code, for each configuration {@code c} in {@code C}:

+// +//
+// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
+// alt and not pred
+// 
+// +//

The values in {@code map} are the set of {@code A_s,ctx} sets.

+// +//

If {@code |A_s,ctx|=1} then there is no conflict associated with +// {@code s} and {@code ctx}.

+// +//

Reduce the subsets to singletons by choosing a minimum of each subset. If +// the union of these alternative subsets is a singleton, then no amount of +// more lookahead will help us. We will always pick that alternative. If, +// however, there is more than one alternative, then we are uncertain which +// alternative to predict and must continue looking for resolution. We may +// or may not discover an ambiguity in the future, even if there are no +// conflicting subsets this round.

+// +//

The biggest sin is to terminate early because it means we've made a +// decision but were uncertain as to the eventual outcome. We haven't used +// enough lookahead. On the other hand, announcing a conflict too late is no +// big deal you will still have the conflict. It's just inefficient. It +// might even look until the end of file.

+// +//

No special consideration for semantic predicates is required because +// predicates are evaluated on-the-fly for full LL prediction, ensuring that +// no configuration contains a semantic context during the termination +// check.

+// +//

CONFLICTING CONFIGS

+// +//

Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict +// when {@code i!=j} but {@code x=x'}. Because we merge all +// {@code (s, i, _)} configurations together, that means that there are at +// most {@code n} configurations associated with state {@code s} for +// {@code n} possible alternatives in the decision. The merged stacks +// complicate the comparison of configuration contexts {@code x} and +// {@code x'}. Sam checks to see if one is a subset of the other by calling +// merge and checking to see if the merged result is either {@code x} or +// {@code x'}. If the {@code x} associated with lowest alternative {@code i} +// is the superset, then {@code i} is the only possible prediction since the +// others resolve to {@code min(i)} as well. However, if {@code x} is +// associated with {@code j>i} then at least one stack configuration for +// {@code j} is not in conflict with alternative {@code i}. The algorithm +// should keep going, looking for more lookahead due to the uncertainty.

+// +//

For simplicity, I'm doing a equality check between {@code x} and +// {@code x'} that lets the algorithm continue to consume lookahead longer +// than necessary. The reason I like the equality is of course the +// simplicity but also because that is the test you need to detect the +// alternatives that are actually in conflict.

+// +//

CONTINUE/STOP RULE

+// +//

Continue if union of resolved alternative sets from non-conflicting and +// conflicting alternative subsets has more than one alternative. We are +// uncertain about which alternative to predict.

+// +//

The complete set of alternatives, {@code [i for (_,i,_)]}, tells us which +// alternatives are still in the running for the amount of input we've +// consumed at this point. The conflicting sets let us to strip away +// configurations that won't lead to more states because we resolve +// conflicts to the configuration with a minimum alternate for the +// conflicting set.

+// +//

CASES

+// +//
    +// +//
  • no conflicts and more than 1 alternative in set => continue
  • +// +//
  • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s, 3, z)}, +// {@code (s', 1, y)}, {@code (s', 2, y)} yields non-conflicting set +// {@code {3}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} = +// {@code {1,3}} => continue +//
  • +// +//
  • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)}, +// {@code (s', 2, y)}, {@code (s”, 1, z)} yields non-conflicting set +// {@code {1}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} = +// {@code {1}} => stop and predict 1
  • +// +//
  • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)}, +// {@code (s', 2, y)} yields conflicting, reduced sets {@code {1}} U +// {@code {1}} = {@code {1}} => stop and predict 1, can announce +// ambiguity {@code {1,2}}
  • +// +//
  • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 2, y)}, +// {@code (s', 3, y)} yields conflicting, reduced sets {@code {1}} U +// {@code {2}} = {@code {1,2}} => continue
  • +// +//
  • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 3, y)}, +// {@code (s', 4, y)} yields conflicting, reduced sets {@code {1}} U +// {@code {3}} = {@code {1,3}} => continue
  • +// +//
+// +//

EXACT AMBIGUITY DETECTION

+// +//

If all states Report the same conflicting set of alternatives, then we +// know we have the exact ambiguity set.

+// +//

|A_i|>1 and +// A_i = A_j for all i, j.

+// +//

In other words, we continue examining lookahead until all {@code A_i} +// have more than one alternative and all {@code A_i} are the same. If +// {@code A={{1,2}, {1,3}}}, then regular LL prediction would terminate +// because the resolved set is {@code {1}}. To determine what the real +// ambiguity is, we have to know whether the ambiguity is between one and +// two or one and three so we keep going. We can only stop prediction when +// we need exact ambiguity detection when the sets look like +// {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...

+func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int { + return PredictionModegetSingleViableAlt(altsets) +} + +// Determines if every alternative subset in {@code altsets} contains more +// than one alternative. +// +// @param altsets a collection of alternative subsets +// @return {@code true} if every {@link BitSet} in {@code altsets} has +// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false} +func PredictionModeallSubsetsConflict(altsets []*BitSet) bool { + return !PredictionModehasNonConflictingAltSet(altsets) +} + +// Determines if any single alternative subset in {@code altsets} contains +// exactly one alternative. +// +// @param altsets a collection of alternative subsets +// @return {@code true} if {@code altsets} contains a {@link BitSet} with +// {@link BitSet//cardinality cardinality} 1, otherwise {@code false} +func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool { + for i := 0; i < len(altsets); i++ { + alts := altsets[i] + if alts.length() == 1 { + return true + } + } + return false +} + +// Determines if any single alternative subset in {@code altsets} contains +// more than one alternative. +// +// @param altsets a collection of alternative subsets +// @return {@code true} if {@code altsets} contains a {@link BitSet} with +// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false} +func PredictionModehasConflictingAltSet(altsets []*BitSet) bool { + for i := 0; i < len(altsets); i++ { + alts := altsets[i] + if alts.length() > 1 { + return true + } + } + return false +} + +// Determines if every alternative subset in {@code altsets} is equivalent. +// +// @param altsets a collection of alternative subsets +// @return {@code true} if every member of {@code altsets} is equal to the +// others, otherwise {@code false} +func PredictionModeallSubsetsEqual(altsets []*BitSet) bool { + var first *BitSet + + for i := 0; i < len(altsets); i++ { + alts := altsets[i] + if first == nil { + first = alts + } else if alts != first { + return false + } + } + + return true +} + +// Returns the unique alternative predicted by all alternative subsets in +// {@code altsets}. If no such alternative exists, this method returns +// {@link ATN//INVALID_ALT_NUMBER}. +// +// @param altsets a collection of alternative subsets +func PredictionModegetUniqueAlt(altsets []*BitSet) int { + all := PredictionModeGetAlts(altsets) + if all.length() == 1 { + return all.minValue() + } + + return ATNInvalidAltNumber +} + +// Gets the complete set of represented alternatives for a collection of +// alternative subsets. This method returns the union of each {@link BitSet} +// in {@code altsets}. +// +// @param altsets a collection of alternative subsets +// @return the set of represented alternatives in {@code altsets} +func PredictionModeGetAlts(altsets []*BitSet) *BitSet { + all := NewBitSet() + for _, alts := range altsets { + all.or(alts) + } + return all +} + +// PredictionModegetConflictingAltSubsets gets the conflicting alt subsets from a configuration set. +// For each configuration {@code c} in {@code configs}: +// +//
+// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
+// alt and not pred
+// 
+func PredictionModegetConflictingAltSubsets(configs ATNConfigSet) []*BitSet { + configToAlts := NewJMap[ATNConfig, *BitSet, *ATNAltConfigComparator[ATNConfig]](atnAltCfgEqInst) + + for _, c := range configs.GetItems() { + + alts, ok := configToAlts.Get(c) + if !ok { + alts = NewBitSet() + configToAlts.Put(c, alts) + } + alts.add(c.GetAlt()) + } + + return configToAlts.Values() +} + +// PredictionModeGetStateToAltMap gets a map from state to alt subset from a configuration set. For each +// configuration {@code c} in {@code configs}: +// +//
+// map[c.{@link ATNConfig//state state}] U= c.{@link ATNConfig//alt alt}
+// 
+func PredictionModeGetStateToAltMap(configs ATNConfigSet) *AltDict { + m := NewAltDict() + + for _, c := range configs.GetItems() { + alts := m.Get(c.GetState().String()) + if alts == nil { + alts = NewBitSet() + m.put(c.GetState().String(), alts) + } + alts.(*BitSet).add(c.GetAlt()) + } + return m +} + +func PredictionModehasStateAssociatedWithOneAlt(configs ATNConfigSet) bool { + values := PredictionModeGetStateToAltMap(configs).values() + for i := 0; i < len(values); i++ { + if values[i].(*BitSet).length() == 1 { + return true + } + } + return false +} + +func PredictionModegetSingleViableAlt(altsets []*BitSet) int { + result := ATNInvalidAltNumber + + for i := 0; i < len(altsets); i++ { + alts := altsets[i] + minAlt := alts.minValue() + if result == ATNInvalidAltNumber { + result = minAlt + } else if result != minAlt { // more than 1 viable alt + return ATNInvalidAltNumber + } + } + return result +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/recognizer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/recognizer.go index bfe542d091..e2915bcf14 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/recognizer.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/recognizer.go @@ -1,216 +1,216 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "strings" - - "strconv" -) - -type Recognizer interface { - GetLiteralNames() []string - GetSymbolicNames() []string - GetRuleNames() []string - - Sempred(RuleContext, int, int) bool - Precpred(RuleContext, int) bool - - GetState() int - SetState(int) - Action(RuleContext, int, int) - AddErrorListener(ErrorListener) - RemoveErrorListeners() - GetATN() *ATN - GetErrorListenerDispatch() ErrorListener -} - -type BaseRecognizer struct { - listeners []ErrorListener - state int - - RuleNames []string - LiteralNames []string - SymbolicNames []string - GrammarFileName string -} - -func NewBaseRecognizer() *BaseRecognizer { - rec := new(BaseRecognizer) - rec.listeners = []ErrorListener{ConsoleErrorListenerINSTANCE} - rec.state = -1 - return rec -} - -var tokenTypeMapCache = make(map[string]int) -var ruleIndexMapCache = make(map[string]int) - -func (b *BaseRecognizer) checkVersion(toolVersion string) { - runtimeVersion := "4.12.0" - if runtimeVersion != toolVersion { - fmt.Println("ANTLR runtime and generated code versions disagree: " + runtimeVersion + "!=" + toolVersion) - } -} - -func (b *BaseRecognizer) Action(context RuleContext, ruleIndex, actionIndex int) { - panic("action not implemented on Recognizer!") -} - -func (b *BaseRecognizer) AddErrorListener(listener ErrorListener) { - b.listeners = append(b.listeners, listener) -} - -func (b *BaseRecognizer) RemoveErrorListeners() { - b.listeners = make([]ErrorListener, 0) -} - -func (b *BaseRecognizer) GetRuleNames() []string { - return b.RuleNames -} - -func (b *BaseRecognizer) GetTokenNames() []string { - return b.LiteralNames -} - -func (b *BaseRecognizer) GetSymbolicNames() []string { - return b.SymbolicNames -} - -func (b *BaseRecognizer) GetLiteralNames() []string { - return b.LiteralNames -} - -func (b *BaseRecognizer) GetState() int { - return b.state -} - -func (b *BaseRecognizer) SetState(v int) { - b.state = v -} - -//func (b *Recognizer) GetTokenTypeMap() { -// var tokenNames = b.GetTokenNames() -// if (tokenNames==nil) { -// panic("The current recognizer does not provide a list of token names.") -// } -// var result = tokenTypeMapCache[tokenNames] -// if(result==nil) { -// result = tokenNames.reduce(function(o, k, i) { o[k] = i }) -// result.EOF = TokenEOF -// tokenTypeMapCache[tokenNames] = result -// } -// return result -//} - -// Get a map from rule names to rule indexes. -// -//

Used for XPath and tree pattern compilation.

-func (b *BaseRecognizer) GetRuleIndexMap() map[string]int { - - panic("Method not defined!") - // var ruleNames = b.GetRuleNames() - // if (ruleNames==nil) { - // panic("The current recognizer does not provide a list of rule names.") - // } - // - // var result = ruleIndexMapCache[ruleNames] - // if(result==nil) { - // result = ruleNames.reduce(function(o, k, i) { o[k] = i }) - // ruleIndexMapCache[ruleNames] = result - // } - // return result -} - -func (b *BaseRecognizer) GetTokenType(tokenName string) int { - panic("Method not defined!") - // var ttype = b.GetTokenTypeMap()[tokenName] - // if (ttype !=nil) { - // return ttype - // } else { - // return TokenInvalidType - // } -} - -//func (b *Recognizer) GetTokenTypeMap() map[string]int { -// Vocabulary vocabulary = getVocabulary() -// -// Synchronized (tokenTypeMapCache) { -// Map result = tokenTypeMapCache.Get(vocabulary) -// if (result == null) { -// result = new HashMap() -// for (int i = 0; i < GetATN().maxTokenType; i++) { -// String literalName = vocabulary.getLiteralName(i) -// if (literalName != null) { -// result.put(literalName, i) -// } -// -// String symbolicName = vocabulary.GetSymbolicName(i) -// if (symbolicName != null) { -// result.put(symbolicName, i) -// } -// } -// -// result.put("EOF", Token.EOF) -// result = Collections.unmodifiableMap(result) -// tokenTypeMapCache.put(vocabulary, result) -// } -// -// return result -// } -//} - -// What is the error header, normally line/character position information?// -func (b *BaseRecognizer) GetErrorHeader(e RecognitionException) string { - line := e.GetOffendingToken().GetLine() - column := e.GetOffendingToken().GetColumn() - return "line " + strconv.Itoa(line) + ":" + strconv.Itoa(column) -} - -// How should a token be displayed in an error message? The default -// -// is to display just the text, but during development you might -// want to have a lot of information spit out. Override in that case -// to use t.String() (which, for CommonToken, dumps everything about -// the token). This is better than forcing you to override a method in -// your token objects because you don't have to go modify your lexer -// so that it creates a NewJava type. -// -// @deprecated This method is not called by the ANTLR 4 Runtime. Specific -// implementations of {@link ANTLRErrorStrategy} may provide a similar -// feature when necessary. For example, see -// {@link DefaultErrorStrategy//GetTokenErrorDisplay}. -func (b *BaseRecognizer) GetTokenErrorDisplay(t Token) string { - if t == nil { - return "" - } - s := t.GetText() - if s == "" { - if t.GetTokenType() == TokenEOF { - s = "" - } else { - s = "<" + strconv.Itoa(t.GetTokenType()) + ">" - } - } - s = strings.Replace(s, "\t", "\\t", -1) - s = strings.Replace(s, "\n", "\\n", -1) - s = strings.Replace(s, "\r", "\\r", -1) - - return "'" + s + "'" -} - -func (b *BaseRecognizer) GetErrorListenerDispatch() ErrorListener { - return NewProxyErrorListener(b.listeners) -} - -// subclass needs to override these if there are sempreds or actions -// that the ATN interp needs to execute -func (b *BaseRecognizer) Sempred(localctx RuleContext, ruleIndex int, actionIndex int) bool { - return true -} - -func (b *BaseRecognizer) Precpred(localctx RuleContext, precedence int) bool { - return true -} +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strings" + + "strconv" +) + +type Recognizer interface { + GetLiteralNames() []string + GetSymbolicNames() []string + GetRuleNames() []string + + Sempred(RuleContext, int, int) bool + Precpred(RuleContext, int) bool + + GetState() int + SetState(int) + Action(RuleContext, int, int) + AddErrorListener(ErrorListener) + RemoveErrorListeners() + GetATN() *ATN + GetErrorListenerDispatch() ErrorListener +} + +type BaseRecognizer struct { + listeners []ErrorListener + state int + + RuleNames []string + LiteralNames []string + SymbolicNames []string + GrammarFileName string +} + +func NewBaseRecognizer() *BaseRecognizer { + rec := new(BaseRecognizer) + rec.listeners = []ErrorListener{ConsoleErrorListenerINSTANCE} + rec.state = -1 + return rec +} + +var tokenTypeMapCache = make(map[string]int) +var ruleIndexMapCache = make(map[string]int) + +func (b *BaseRecognizer) checkVersion(toolVersion string) { + runtimeVersion := "4.12.0" + if runtimeVersion != toolVersion { + fmt.Println("ANTLR runtime and generated code versions disagree: " + runtimeVersion + "!=" + toolVersion) + } +} + +func (b *BaseRecognizer) Action(context RuleContext, ruleIndex, actionIndex int) { + panic("action not implemented on Recognizer!") +} + +func (b *BaseRecognizer) AddErrorListener(listener ErrorListener) { + b.listeners = append(b.listeners, listener) +} + +func (b *BaseRecognizer) RemoveErrorListeners() { + b.listeners = make([]ErrorListener, 0) +} + +func (b *BaseRecognizer) GetRuleNames() []string { + return b.RuleNames +} + +func (b *BaseRecognizer) GetTokenNames() []string { + return b.LiteralNames +} + +func (b *BaseRecognizer) GetSymbolicNames() []string { + return b.SymbolicNames +} + +func (b *BaseRecognizer) GetLiteralNames() []string { + return b.LiteralNames +} + +func (b *BaseRecognizer) GetState() int { + return b.state +} + +func (b *BaseRecognizer) SetState(v int) { + b.state = v +} + +//func (b *Recognizer) GetTokenTypeMap() { +// var tokenNames = b.GetTokenNames() +// if (tokenNames==nil) { +// panic("The current recognizer does not provide a list of token names.") +// } +// var result = tokenTypeMapCache[tokenNames] +// if(result==nil) { +// result = tokenNames.reduce(function(o, k, i) { o[k] = i }) +// result.EOF = TokenEOF +// tokenTypeMapCache[tokenNames] = result +// } +// return result +//} + +// Get a map from rule names to rule indexes. +// +//

Used for XPath and tree pattern compilation.

+func (b *BaseRecognizer) GetRuleIndexMap() map[string]int { + + panic("Method not defined!") + // var ruleNames = b.GetRuleNames() + // if (ruleNames==nil) { + // panic("The current recognizer does not provide a list of rule names.") + // } + // + // var result = ruleIndexMapCache[ruleNames] + // if(result==nil) { + // result = ruleNames.reduce(function(o, k, i) { o[k] = i }) + // ruleIndexMapCache[ruleNames] = result + // } + // return result +} + +func (b *BaseRecognizer) GetTokenType(tokenName string) int { + panic("Method not defined!") + // var ttype = b.GetTokenTypeMap()[tokenName] + // if (ttype !=nil) { + // return ttype + // } else { + // return TokenInvalidType + // } +} + +//func (b *Recognizer) GetTokenTypeMap() map[string]int { +// Vocabulary vocabulary = getVocabulary() +// +// Synchronized (tokenTypeMapCache) { +// Map result = tokenTypeMapCache.Get(vocabulary) +// if (result == null) { +// result = new HashMap() +// for (int i = 0; i < GetATN().maxTokenType; i++) { +// String literalName = vocabulary.getLiteralName(i) +// if (literalName != null) { +// result.put(literalName, i) +// } +// +// String symbolicName = vocabulary.GetSymbolicName(i) +// if (symbolicName != null) { +// result.put(symbolicName, i) +// } +// } +// +// result.put("EOF", Token.EOF) +// result = Collections.unmodifiableMap(result) +// tokenTypeMapCache.put(vocabulary, result) +// } +// +// return result +// } +//} + +// What is the error header, normally line/character position information?// +func (b *BaseRecognizer) GetErrorHeader(e RecognitionException) string { + line := e.GetOffendingToken().GetLine() + column := e.GetOffendingToken().GetColumn() + return "line " + strconv.Itoa(line) + ":" + strconv.Itoa(column) +} + +// How should a token be displayed in an error message? The default +// +// is to display just the text, but during development you might +// want to have a lot of information spit out. Override in that case +// to use t.String() (which, for CommonToken, dumps everything about +// the token). This is better than forcing you to override a method in +// your token objects because you don't have to go modify your lexer +// so that it creates a NewJava type. +// +// @deprecated This method is not called by the ANTLR 4 Runtime. Specific +// implementations of {@link ANTLRErrorStrategy} may provide a similar +// feature when necessary. For example, see +// {@link DefaultErrorStrategy//GetTokenErrorDisplay}. +func (b *BaseRecognizer) GetTokenErrorDisplay(t Token) string { + if t == nil { + return "" + } + s := t.GetText() + if s == "" { + if t.GetTokenType() == TokenEOF { + s = "" + } else { + s = "<" + strconv.Itoa(t.GetTokenType()) + ">" + } + } + s = strings.Replace(s, "\t", "\\t", -1) + s = strings.Replace(s, "\n", "\\n", -1) + s = strings.Replace(s, "\r", "\\r", -1) + + return "'" + s + "'" +} + +func (b *BaseRecognizer) GetErrorListenerDispatch() ErrorListener { + return NewProxyErrorListener(b.listeners) +} + +// subclass needs to override these if there are sempreds or actions +// that the ATN interp needs to execute +func (b *BaseRecognizer) Sempred(localctx RuleContext, ruleIndex int, actionIndex int) bool { + return true +} + +func (b *BaseRecognizer) Precpred(localctx RuleContext, precedence int) bool { + return true +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/rule_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/rule_context.go index 210699ba23..6519d820cb 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/rule_context.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/rule_context.go @@ -1,114 +1,114 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -// A rule context is a record of a single rule invocation. It knows -// which context invoked it, if any. If there is no parent context, then -// naturally the invoking state is not valid. The parent link -// provides a chain upwards from the current rule invocation to the root -// of the invocation tree, forming a stack. We actually carry no -// information about the rule associated with b context (except -// when parsing). We keep only the state number of the invoking state from -// the ATN submachine that invoked b. Contrast b with the s -// pointer inside ParserRuleContext that tracks the current state -// being "executed" for the current rule. -// -// The parent contexts are useful for computing lookahead sets and -// getting error information. -// -// These objects are used during parsing and prediction. -// For the special case of parsers, we use the subclass -// ParserRuleContext. -// -// @see ParserRuleContext -// - -type RuleContext interface { - RuleNode - - GetInvokingState() int - SetInvokingState(int) - - GetRuleIndex() int - IsEmpty() bool - - GetAltNumber() int - SetAltNumber(altNumber int) - - String([]string, RuleContext) string -} - -type BaseRuleContext struct { - parentCtx RuleContext - invokingState int - RuleIndex int -} - -func NewBaseRuleContext(parent RuleContext, invokingState int) *BaseRuleContext { - - rn := new(BaseRuleContext) - - // What context invoked b rule? - rn.parentCtx = parent - - // What state invoked the rule associated with b context? - // The "return address" is the followState of invokingState - // If parent is nil, b should be -1. - if parent == nil { - rn.invokingState = -1 - } else { - rn.invokingState = invokingState - } - - return rn -} - -func (b *BaseRuleContext) GetBaseRuleContext() *BaseRuleContext { - return b -} - -func (b *BaseRuleContext) SetParent(v Tree) { - if v == nil { - b.parentCtx = nil - } else { - b.parentCtx = v.(RuleContext) - } -} - -func (b *BaseRuleContext) GetInvokingState() int { - return b.invokingState -} - -func (b *BaseRuleContext) SetInvokingState(t int) { - b.invokingState = t -} - -func (b *BaseRuleContext) GetRuleIndex() int { - return b.RuleIndex -} - -func (b *BaseRuleContext) GetAltNumber() int { - return ATNInvalidAltNumber -} - -func (b *BaseRuleContext) SetAltNumber(altNumber int) {} - -// A context is empty if there is no invoking state meaning nobody call -// current context. -func (b *BaseRuleContext) IsEmpty() bool { - return b.invokingState == -1 -} - -// Return the combined text of all child nodes. This method only considers -// tokens which have been added to the parse tree. -//

-// Since tokens on hidden channels (e.g. whitespace or comments) are not -// added to the parse trees, they will not appear in the output of b -// method. -// - -func (b *BaseRuleContext) GetParent() Tree { - return b.parentCtx -} +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// A rule context is a record of a single rule invocation. It knows +// which context invoked it, if any. If there is no parent context, then +// naturally the invoking state is not valid. The parent link +// provides a chain upwards from the current rule invocation to the root +// of the invocation tree, forming a stack. We actually carry no +// information about the rule associated with b context (except +// when parsing). We keep only the state number of the invoking state from +// the ATN submachine that invoked b. Contrast b with the s +// pointer inside ParserRuleContext that tracks the current state +// being "executed" for the current rule. +// +// The parent contexts are useful for computing lookahead sets and +// getting error information. +// +// These objects are used during parsing and prediction. +// For the special case of parsers, we use the subclass +// ParserRuleContext. +// +// @see ParserRuleContext +// + +type RuleContext interface { + RuleNode + + GetInvokingState() int + SetInvokingState(int) + + GetRuleIndex() int + IsEmpty() bool + + GetAltNumber() int + SetAltNumber(altNumber int) + + String([]string, RuleContext) string +} + +type BaseRuleContext struct { + parentCtx RuleContext + invokingState int + RuleIndex int +} + +func NewBaseRuleContext(parent RuleContext, invokingState int) *BaseRuleContext { + + rn := new(BaseRuleContext) + + // What context invoked b rule? + rn.parentCtx = parent + + // What state invoked the rule associated with b context? + // The "return address" is the followState of invokingState + // If parent is nil, b should be -1. + if parent == nil { + rn.invokingState = -1 + } else { + rn.invokingState = invokingState + } + + return rn +} + +func (b *BaseRuleContext) GetBaseRuleContext() *BaseRuleContext { + return b +} + +func (b *BaseRuleContext) SetParent(v Tree) { + if v == nil { + b.parentCtx = nil + } else { + b.parentCtx = v.(RuleContext) + } +} + +func (b *BaseRuleContext) GetInvokingState() int { + return b.invokingState +} + +func (b *BaseRuleContext) SetInvokingState(t int) { + b.invokingState = t +} + +func (b *BaseRuleContext) GetRuleIndex() int { + return b.RuleIndex +} + +func (b *BaseRuleContext) GetAltNumber() int { + return ATNInvalidAltNumber +} + +func (b *BaseRuleContext) SetAltNumber(altNumber int) {} + +// A context is empty if there is no invoking state meaning nobody call +// current context. +func (b *BaseRuleContext) IsEmpty() bool { + return b.invokingState == -1 +} + +// Return the combined text of all child nodes. This method only considers +// tokens which have been added to the parse tree. +//

+// Since tokens on hidden channels (e.g. whitespace or comments) are not +// added to the parse trees, they will not appear in the output of b +// method. +// + +func (b *BaseRuleContext) GetParent() Tree { + return b.parentCtx +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/semantic_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/semantic_context.go index a702e99def..277677f448 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/semantic_context.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/semantic_context.go @@ -1,469 +1,469 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "strconv" -) - -// A tree structure used to record the semantic context in which -// an ATN configuration is valid. It's either a single predicate, -// a conjunction {@code p1&&p2}, or a sum of products {@code p1||p2}. -// -//

I have scoped the {@link AND}, {@link OR}, and {@link Predicate} subclasses of -// {@link SemanticContext} within the scope of this outer class.

-// - -type SemanticContext interface { - Equals(other Collectable[SemanticContext]) bool - Hash() int - - evaluate(parser Recognizer, outerContext RuleContext) bool - evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext - - String() string -} - -func SemanticContextandContext(a, b SemanticContext) SemanticContext { - if a == nil || a == SemanticContextNone { - return b - } - if b == nil || b == SemanticContextNone { - return a - } - result := NewAND(a, b) - if len(result.opnds) == 1 { - return result.opnds[0] - } - - return result -} - -func SemanticContextorContext(a, b SemanticContext) SemanticContext { - if a == nil { - return b - } - if b == nil { - return a - } - if a == SemanticContextNone || b == SemanticContextNone { - return SemanticContextNone - } - result := NewOR(a, b) - if len(result.opnds) == 1 { - return result.opnds[0] - } - - return result -} - -type Predicate struct { - ruleIndex int - predIndex int - isCtxDependent bool -} - -func NewPredicate(ruleIndex, predIndex int, isCtxDependent bool) *Predicate { - p := new(Predicate) - - p.ruleIndex = ruleIndex - p.predIndex = predIndex - p.isCtxDependent = isCtxDependent // e.g., $i ref in pred - return p -} - -//The default {@link SemanticContext}, which is semantically equivalent to -//a predicate of the form {@code {true}?}. - -var SemanticContextNone = NewPredicate(-1, -1, false) - -func (p *Predicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { - return p -} - -func (p *Predicate) evaluate(parser Recognizer, outerContext RuleContext) bool { - - var localctx RuleContext - - if p.isCtxDependent { - localctx = outerContext - } - - return parser.Sempred(localctx, p.ruleIndex, p.predIndex) -} - -func (p *Predicate) Equals(other Collectable[SemanticContext]) bool { - if p == other { - return true - } else if _, ok := other.(*Predicate); !ok { - return false - } else { - return p.ruleIndex == other.(*Predicate).ruleIndex && - p.predIndex == other.(*Predicate).predIndex && - p.isCtxDependent == other.(*Predicate).isCtxDependent - } -} - -func (p *Predicate) Hash() int { - h := murmurInit(0) - h = murmurUpdate(h, p.ruleIndex) - h = murmurUpdate(h, p.predIndex) - if p.isCtxDependent { - h = murmurUpdate(h, 1) - } else { - h = murmurUpdate(h, 0) - } - return murmurFinish(h, 3) -} - -func (p *Predicate) String() string { - return "{" + strconv.Itoa(p.ruleIndex) + ":" + strconv.Itoa(p.predIndex) + "}?" -} - -type PrecedencePredicate struct { - precedence int -} - -func NewPrecedencePredicate(precedence int) *PrecedencePredicate { - - p := new(PrecedencePredicate) - p.precedence = precedence - - return p -} - -func (p *PrecedencePredicate) evaluate(parser Recognizer, outerContext RuleContext) bool { - return parser.Precpred(outerContext, p.precedence) -} - -func (p *PrecedencePredicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { - if parser.Precpred(outerContext, p.precedence) { - return SemanticContextNone - } - - return nil -} - -func (p *PrecedencePredicate) compareTo(other *PrecedencePredicate) int { - return p.precedence - other.precedence -} - -func (p *PrecedencePredicate) Equals(other Collectable[SemanticContext]) bool { - - var op *PrecedencePredicate - var ok bool - if op, ok = other.(*PrecedencePredicate); !ok { - return false - } - - if p == op { - return true - } - - return p.precedence == other.(*PrecedencePredicate).precedence -} - -func (p *PrecedencePredicate) Hash() int { - h := uint32(1) - h = 31*h + uint32(p.precedence) - return int(h) -} - -func (p *PrecedencePredicate) String() string { - return "{" + strconv.Itoa(p.precedence) + ">=prec}?" -} - -func PrecedencePredicatefilterPrecedencePredicates(set *JStore[SemanticContext, Comparator[SemanticContext]]) []*PrecedencePredicate { - result := make([]*PrecedencePredicate, 0) - - set.Each(func(v SemanticContext) bool { - if c2, ok := v.(*PrecedencePredicate); ok { - result = append(result, c2) - } - return true - }) - - return result -} - -// A semantic context which is true whenever none of the contained contexts -// is false.` - -type AND struct { - opnds []SemanticContext -} - -func NewAND(a, b SemanticContext) *AND { - - operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst) - if aa, ok := a.(*AND); ok { - for _, o := range aa.opnds { - operands.Put(o) - } - } else { - operands.Put(a) - } - - if ba, ok := b.(*AND); ok { - for _, o := range ba.opnds { - operands.Put(o) - } - } else { - operands.Put(b) - } - precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands) - if len(precedencePredicates) > 0 { - // interested in the transition with the lowest precedence - var reduced *PrecedencePredicate - - for _, p := range precedencePredicates { - if reduced == nil || p.precedence < reduced.precedence { - reduced = p - } - } - - operands.Put(reduced) - } - - vs := operands.Values() - opnds := make([]SemanticContext, len(vs)) - for i, v := range vs { - opnds[i] = v.(SemanticContext) - } - - and := new(AND) - and.opnds = opnds - - return and -} - -func (a *AND) Equals(other Collectable[SemanticContext]) bool { - if a == other { - return true - } - if _, ok := other.(*AND); !ok { - return false - } else { - for i, v := range other.(*AND).opnds { - if !a.opnds[i].Equals(v) { - return false - } - } - return true - } -} - -// {@inheritDoc} -// -//

-// The evaluation of predicates by a context is short-circuiting, but -// unordered.

-func (a *AND) evaluate(parser Recognizer, outerContext RuleContext) bool { - for i := 0; i < len(a.opnds); i++ { - if !a.opnds[i].evaluate(parser, outerContext) { - return false - } - } - return true -} - -func (a *AND) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { - differs := false - operands := make([]SemanticContext, 0) - - for i := 0; i < len(a.opnds); i++ { - context := a.opnds[i] - evaluated := context.evalPrecedence(parser, outerContext) - differs = differs || (evaluated != context) - if evaluated == nil { - // The AND context is false if any element is false - return nil - } else if evaluated != SemanticContextNone { - // Reduce the result by Skipping true elements - operands = append(operands, evaluated) - } - } - if !differs { - return a - } - - if len(operands) == 0 { - // all elements were true, so the AND context is true - return SemanticContextNone - } - - var result SemanticContext - - for _, o := range operands { - if result == nil { - result = o - } else { - result = SemanticContextandContext(result, o) - } - } - - return result -} - -func (a *AND) Hash() int { - h := murmurInit(37) // Init with a value different from OR - for _, op := range a.opnds { - h = murmurUpdate(h, op.Hash()) - } - return murmurFinish(h, len(a.opnds)) -} - -func (a *OR) Hash() int { - h := murmurInit(41) // Init with a value different from AND - for _, op := range a.opnds { - h = murmurUpdate(h, op.Hash()) - } - return murmurFinish(h, len(a.opnds)) -} - -func (a *AND) String() string { - s := "" - - for _, o := range a.opnds { - s += "&& " + fmt.Sprint(o) - } - - if len(s) > 3 { - return s[0:3] - } - - return s -} - -// -// A semantic context which is true whenever at least one of the contained -// contexts is true. -// - -type OR struct { - opnds []SemanticContext -} - -func NewOR(a, b SemanticContext) *OR { - - operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst) - if aa, ok := a.(*OR); ok { - for _, o := range aa.opnds { - operands.Put(o) - } - } else { - operands.Put(a) - } - - if ba, ok := b.(*OR); ok { - for _, o := range ba.opnds { - operands.Put(o) - } - } else { - operands.Put(b) - } - precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands) - if len(precedencePredicates) > 0 { - // interested in the transition with the lowest precedence - var reduced *PrecedencePredicate - - for _, p := range precedencePredicates { - if reduced == nil || p.precedence > reduced.precedence { - reduced = p - } - } - - operands.Put(reduced) - } - - vs := operands.Values() - - opnds := make([]SemanticContext, len(vs)) - for i, v := range vs { - opnds[i] = v.(SemanticContext) - } - - o := new(OR) - o.opnds = opnds - - return o -} - -func (o *OR) Equals(other Collectable[SemanticContext]) bool { - if o == other { - return true - } else if _, ok := other.(*OR); !ok { - return false - } else { - for i, v := range other.(*OR).opnds { - if !o.opnds[i].Equals(v) { - return false - } - } - return true - } -} - -//

-// The evaluation of predicates by o context is short-circuiting, but -// unordered.

-func (o *OR) evaluate(parser Recognizer, outerContext RuleContext) bool { - for i := 0; i < len(o.opnds); i++ { - if o.opnds[i].evaluate(parser, outerContext) { - return true - } - } - return false -} - -func (o *OR) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { - differs := false - operands := make([]SemanticContext, 0) - for i := 0; i < len(o.opnds); i++ { - context := o.opnds[i] - evaluated := context.evalPrecedence(parser, outerContext) - differs = differs || (evaluated != context) - if evaluated == SemanticContextNone { - // The OR context is true if any element is true - return SemanticContextNone - } else if evaluated != nil { - // Reduce the result by Skipping false elements - operands = append(operands, evaluated) - } - } - if !differs { - return o - } - if len(operands) == 0 { - // all elements were false, so the OR context is false - return nil - } - var result SemanticContext - - for _, o := range operands { - if result == nil { - result = o - } else { - result = SemanticContextorContext(result, o) - } - } - - return result -} - -func (o *OR) String() string { - s := "" - - for _, o := range o.opnds { - s += "|| " + fmt.Sprint(o) - } - - if len(s) > 3 { - return s[0:3] - } - - return s -} +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" +) + +// A tree structure used to record the semantic context in which +// an ATN configuration is valid. It's either a single predicate, +// a conjunction {@code p1&&p2}, or a sum of products {@code p1||p2}. +// +//

I have scoped the {@link AND}, {@link OR}, and {@link Predicate} subclasses of +// {@link SemanticContext} within the scope of this outer class.

+// + +type SemanticContext interface { + Equals(other Collectable[SemanticContext]) bool + Hash() int + + evaluate(parser Recognizer, outerContext RuleContext) bool + evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext + + String() string +} + +func SemanticContextandContext(a, b SemanticContext) SemanticContext { + if a == nil || a == SemanticContextNone { + return b + } + if b == nil || b == SemanticContextNone { + return a + } + result := NewAND(a, b) + if len(result.opnds) == 1 { + return result.opnds[0] + } + + return result +} + +func SemanticContextorContext(a, b SemanticContext) SemanticContext { + if a == nil { + return b + } + if b == nil { + return a + } + if a == SemanticContextNone || b == SemanticContextNone { + return SemanticContextNone + } + result := NewOR(a, b) + if len(result.opnds) == 1 { + return result.opnds[0] + } + + return result +} + +type Predicate struct { + ruleIndex int + predIndex int + isCtxDependent bool +} + +func NewPredicate(ruleIndex, predIndex int, isCtxDependent bool) *Predicate { + p := new(Predicate) + + p.ruleIndex = ruleIndex + p.predIndex = predIndex + p.isCtxDependent = isCtxDependent // e.g., $i ref in pred + return p +} + +//The default {@link SemanticContext}, which is semantically equivalent to +//a predicate of the form {@code {true}?}. + +var SemanticContextNone = NewPredicate(-1, -1, false) + +func (p *Predicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { + return p +} + +func (p *Predicate) evaluate(parser Recognizer, outerContext RuleContext) bool { + + var localctx RuleContext + + if p.isCtxDependent { + localctx = outerContext + } + + return parser.Sempred(localctx, p.ruleIndex, p.predIndex) +} + +func (p *Predicate) Equals(other Collectable[SemanticContext]) bool { + if p == other { + return true + } else if _, ok := other.(*Predicate); !ok { + return false + } else { + return p.ruleIndex == other.(*Predicate).ruleIndex && + p.predIndex == other.(*Predicate).predIndex && + p.isCtxDependent == other.(*Predicate).isCtxDependent + } +} + +func (p *Predicate) Hash() int { + h := murmurInit(0) + h = murmurUpdate(h, p.ruleIndex) + h = murmurUpdate(h, p.predIndex) + if p.isCtxDependent { + h = murmurUpdate(h, 1) + } else { + h = murmurUpdate(h, 0) + } + return murmurFinish(h, 3) +} + +func (p *Predicate) String() string { + return "{" + strconv.Itoa(p.ruleIndex) + ":" + strconv.Itoa(p.predIndex) + "}?" +} + +type PrecedencePredicate struct { + precedence int +} + +func NewPrecedencePredicate(precedence int) *PrecedencePredicate { + + p := new(PrecedencePredicate) + p.precedence = precedence + + return p +} + +func (p *PrecedencePredicate) evaluate(parser Recognizer, outerContext RuleContext) bool { + return parser.Precpred(outerContext, p.precedence) +} + +func (p *PrecedencePredicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { + if parser.Precpred(outerContext, p.precedence) { + return SemanticContextNone + } + + return nil +} + +func (p *PrecedencePredicate) compareTo(other *PrecedencePredicate) int { + return p.precedence - other.precedence +} + +func (p *PrecedencePredicate) Equals(other Collectable[SemanticContext]) bool { + + var op *PrecedencePredicate + var ok bool + if op, ok = other.(*PrecedencePredicate); !ok { + return false + } + + if p == op { + return true + } + + return p.precedence == other.(*PrecedencePredicate).precedence +} + +func (p *PrecedencePredicate) Hash() int { + h := uint32(1) + h = 31*h + uint32(p.precedence) + return int(h) +} + +func (p *PrecedencePredicate) String() string { + return "{" + strconv.Itoa(p.precedence) + ">=prec}?" +} + +func PrecedencePredicatefilterPrecedencePredicates(set *JStore[SemanticContext, Comparator[SemanticContext]]) []*PrecedencePredicate { + result := make([]*PrecedencePredicate, 0) + + set.Each(func(v SemanticContext) bool { + if c2, ok := v.(*PrecedencePredicate); ok { + result = append(result, c2) + } + return true + }) + + return result +} + +// A semantic context which is true whenever none of the contained contexts +// is false.` + +type AND struct { + opnds []SemanticContext +} + +func NewAND(a, b SemanticContext) *AND { + + operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst) + if aa, ok := a.(*AND); ok { + for _, o := range aa.opnds { + operands.Put(o) + } + } else { + operands.Put(a) + } + + if ba, ok := b.(*AND); ok { + for _, o := range ba.opnds { + operands.Put(o) + } + } else { + operands.Put(b) + } + precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands) + if len(precedencePredicates) > 0 { + // interested in the transition with the lowest precedence + var reduced *PrecedencePredicate + + for _, p := range precedencePredicates { + if reduced == nil || p.precedence < reduced.precedence { + reduced = p + } + } + + operands.Put(reduced) + } + + vs := operands.Values() + opnds := make([]SemanticContext, len(vs)) + for i, v := range vs { + opnds[i] = v.(SemanticContext) + } + + and := new(AND) + and.opnds = opnds + + return and +} + +func (a *AND) Equals(other Collectable[SemanticContext]) bool { + if a == other { + return true + } + if _, ok := other.(*AND); !ok { + return false + } else { + for i, v := range other.(*AND).opnds { + if !a.opnds[i].Equals(v) { + return false + } + } + return true + } +} + +// {@inheritDoc} +// +//

+// The evaluation of predicates by a context is short-circuiting, but +// unordered.

+func (a *AND) evaluate(parser Recognizer, outerContext RuleContext) bool { + for i := 0; i < len(a.opnds); i++ { + if !a.opnds[i].evaluate(parser, outerContext) { + return false + } + } + return true +} + +func (a *AND) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { + differs := false + operands := make([]SemanticContext, 0) + + for i := 0; i < len(a.opnds); i++ { + context := a.opnds[i] + evaluated := context.evalPrecedence(parser, outerContext) + differs = differs || (evaluated != context) + if evaluated == nil { + // The AND context is false if any element is false + return nil + } else if evaluated != SemanticContextNone { + // Reduce the result by Skipping true elements + operands = append(operands, evaluated) + } + } + if !differs { + return a + } + + if len(operands) == 0 { + // all elements were true, so the AND context is true + return SemanticContextNone + } + + var result SemanticContext + + for _, o := range operands { + if result == nil { + result = o + } else { + result = SemanticContextandContext(result, o) + } + } + + return result +} + +func (a *AND) Hash() int { + h := murmurInit(37) // Init with a value different from OR + for _, op := range a.opnds { + h = murmurUpdate(h, op.Hash()) + } + return murmurFinish(h, len(a.opnds)) +} + +func (a *OR) Hash() int { + h := murmurInit(41) // Init with a value different from AND + for _, op := range a.opnds { + h = murmurUpdate(h, op.Hash()) + } + return murmurFinish(h, len(a.opnds)) +} + +func (a *AND) String() string { + s := "" + + for _, o := range a.opnds { + s += "&& " + fmt.Sprint(o) + } + + if len(s) > 3 { + return s[0:3] + } + + return s +} + +// +// A semantic context which is true whenever at least one of the contained +// contexts is true. +// + +type OR struct { + opnds []SemanticContext +} + +func NewOR(a, b SemanticContext) *OR { + + operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst) + if aa, ok := a.(*OR); ok { + for _, o := range aa.opnds { + operands.Put(o) + } + } else { + operands.Put(a) + } + + if ba, ok := b.(*OR); ok { + for _, o := range ba.opnds { + operands.Put(o) + } + } else { + operands.Put(b) + } + precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands) + if len(precedencePredicates) > 0 { + // interested in the transition with the lowest precedence + var reduced *PrecedencePredicate + + for _, p := range precedencePredicates { + if reduced == nil || p.precedence > reduced.precedence { + reduced = p + } + } + + operands.Put(reduced) + } + + vs := operands.Values() + + opnds := make([]SemanticContext, len(vs)) + for i, v := range vs { + opnds[i] = v.(SemanticContext) + } + + o := new(OR) + o.opnds = opnds + + return o +} + +func (o *OR) Equals(other Collectable[SemanticContext]) bool { + if o == other { + return true + } else if _, ok := other.(*OR); !ok { + return false + } else { + for i, v := range other.(*OR).opnds { + if !o.opnds[i].Equals(v) { + return false + } + } + return true + } +} + +//

+// The evaluation of predicates by o context is short-circuiting, but +// unordered.

+func (o *OR) evaluate(parser Recognizer, outerContext RuleContext) bool { + for i := 0; i < len(o.opnds); i++ { + if o.opnds[i].evaluate(parser, outerContext) { + return true + } + } + return false +} + +func (o *OR) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { + differs := false + operands := make([]SemanticContext, 0) + for i := 0; i < len(o.opnds); i++ { + context := o.opnds[i] + evaluated := context.evalPrecedence(parser, outerContext) + differs = differs || (evaluated != context) + if evaluated == SemanticContextNone { + // The OR context is true if any element is true + return SemanticContextNone + } else if evaluated != nil { + // Reduce the result by Skipping false elements + operands = append(operands, evaluated) + } + } + if !differs { + return o + } + if len(operands) == 0 { + // all elements were false, so the OR context is false + return nil + } + var result SemanticContext + + for _, o := range operands { + if result == nil { + result = o + } else { + result = SemanticContextorContext(result, o) + } + } + + return result +} + +func (o *OR) String() string { + s := "" + + for _, o := range o.opnds { + s += "|| " + fmt.Sprint(o) + } + + if len(s) > 3 { + return s[0:3] + } + + return s +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token.go index f73b06bc6a..c642ac819d 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token.go @@ -1,209 +1,209 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "strconv" - "strings" -) - -type TokenSourceCharStreamPair struct { - tokenSource TokenSource - charStream CharStream -} - -// A token has properties: text, type, line, character position in the line -// (so we can ignore tabs), token channel, index, and source from which -// we obtained this token. - -type Token interface { - GetSource() *TokenSourceCharStreamPair - GetTokenType() int - GetChannel() int - GetStart() int - GetStop() int - GetLine() int - GetColumn() int - - GetText() string - SetText(s string) - - GetTokenIndex() int - SetTokenIndex(v int) - - GetTokenSource() TokenSource - GetInputStream() CharStream -} - -type BaseToken struct { - source *TokenSourceCharStreamPair - tokenType int // token type of the token - channel int // The parser ignores everything not on DEFAULT_CHANNEL - start int // optional return -1 if not implemented. - stop int // optional return -1 if not implemented. - tokenIndex int // from 0..n-1 of the token object in the input stream - line int // line=1..n of the 1st character - column int // beginning of the line at which it occurs, 0..n-1 - text string // text of the token. - readOnly bool -} - -const ( - TokenInvalidType = 0 - - // During lookahead operations, this "token" signifies we hit rule end ATN state - // and did not follow it despite needing to. - TokenEpsilon = -2 - - TokenMinUserTokenType = 1 - - TokenEOF = -1 - - // All tokens go to the parser (unless Skip() is called in that rule) - // on a particular "channel". The parser tunes to a particular channel - // so that whitespace etc... can go to the parser on a "hidden" channel. - - TokenDefaultChannel = 0 - - // Anything on different channel than DEFAULT_CHANNEL is not parsed - // by parser. - - TokenHiddenChannel = 1 -) - -func (b *BaseToken) GetChannel() int { - return b.channel -} - -func (b *BaseToken) GetStart() int { - return b.start -} - -func (b *BaseToken) GetStop() int { - return b.stop -} - -func (b *BaseToken) GetLine() int { - return b.line -} - -func (b *BaseToken) GetColumn() int { - return b.column -} - -func (b *BaseToken) GetTokenType() int { - return b.tokenType -} - -func (b *BaseToken) GetSource() *TokenSourceCharStreamPair { - return b.source -} - -func (b *BaseToken) GetTokenIndex() int { - return b.tokenIndex -} - -func (b *BaseToken) SetTokenIndex(v int) { - b.tokenIndex = v -} - -func (b *BaseToken) GetTokenSource() TokenSource { - return b.source.tokenSource -} - -func (b *BaseToken) GetInputStream() CharStream { - return b.source.charStream -} - -type CommonToken struct { - *BaseToken -} - -func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start, stop int) *CommonToken { - - t := new(CommonToken) - - t.BaseToken = new(BaseToken) - - t.source = source - t.tokenType = tokenType - t.channel = channel - t.start = start - t.stop = stop - t.tokenIndex = -1 - if t.source.tokenSource != nil { - t.line = source.tokenSource.GetLine() - t.column = source.tokenSource.GetCharPositionInLine() - } else { - t.column = -1 - } - return t -} - -// An empty {@link Pair} which is used as the default value of -// {@link //source} for tokens that do not have a source. - -//CommonToken.EMPTY_SOURCE = [ nil, nil ] - -// Constructs a New{@link CommonToken} as a copy of another {@link Token}. -// -//

-// If {@code oldToken} is also a {@link CommonToken} instance, the newly -// constructed token will share a reference to the {@link //text} field and -// the {@link Pair} stored in {@link //source}. Otherwise, {@link //text} will -// be assigned the result of calling {@link //GetText}, and {@link //source} -// will be constructed from the result of {@link Token//GetTokenSource} and -// {@link Token//GetInputStream}.

-// -// @param oldToken The token to copy. -func (c *CommonToken) clone() *CommonToken { - t := NewCommonToken(c.source, c.tokenType, c.channel, c.start, c.stop) - t.tokenIndex = c.GetTokenIndex() - t.line = c.GetLine() - t.column = c.GetColumn() - t.text = c.GetText() - return t -} - -func (c *CommonToken) GetText() string { - if c.text != "" { - return c.text - } - input := c.GetInputStream() - if input == nil { - return "" - } - n := input.Size() - if c.start < n && c.stop < n { - return input.GetTextFromInterval(NewInterval(c.start, c.stop)) - } - return "" -} - -func (c *CommonToken) SetText(text string) { - c.text = text -} - -func (c *CommonToken) String() string { - txt := c.GetText() - if txt != "" { - txt = strings.Replace(txt, "\n", "\\n", -1) - txt = strings.Replace(txt, "\r", "\\r", -1) - txt = strings.Replace(txt, "\t", "\\t", -1) - } else { - txt = "" - } - - var ch string - if c.channel > 0 { - ch = ",channel=" + strconv.Itoa(c.channel) - } else { - ch = "" - } - - return "[@" + strconv.Itoa(c.tokenIndex) + "," + strconv.Itoa(c.start) + ":" + strconv.Itoa(c.stop) + "='" + - txt + "',<" + strconv.Itoa(c.tokenType) + ">" + - ch + "," + strconv.Itoa(c.line) + ":" + strconv.Itoa(c.column) + "]" -} +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "strconv" + "strings" +) + +type TokenSourceCharStreamPair struct { + tokenSource TokenSource + charStream CharStream +} + +// A token has properties: text, type, line, character position in the line +// (so we can ignore tabs), token channel, index, and source from which +// we obtained this token. + +type Token interface { + GetSource() *TokenSourceCharStreamPair + GetTokenType() int + GetChannel() int + GetStart() int + GetStop() int + GetLine() int + GetColumn() int + + GetText() string + SetText(s string) + + GetTokenIndex() int + SetTokenIndex(v int) + + GetTokenSource() TokenSource + GetInputStream() CharStream +} + +type BaseToken struct { + source *TokenSourceCharStreamPair + tokenType int // token type of the token + channel int // The parser ignores everything not on DEFAULT_CHANNEL + start int // optional return -1 if not implemented. + stop int // optional return -1 if not implemented. + tokenIndex int // from 0..n-1 of the token object in the input stream + line int // line=1..n of the 1st character + column int // beginning of the line at which it occurs, 0..n-1 + text string // text of the token. + readOnly bool +} + +const ( + TokenInvalidType = 0 + + // During lookahead operations, this "token" signifies we hit rule end ATN state + // and did not follow it despite needing to. + TokenEpsilon = -2 + + TokenMinUserTokenType = 1 + + TokenEOF = -1 + + // All tokens go to the parser (unless Skip() is called in that rule) + // on a particular "channel". The parser tunes to a particular channel + // so that whitespace etc... can go to the parser on a "hidden" channel. + + TokenDefaultChannel = 0 + + // Anything on different channel than DEFAULT_CHANNEL is not parsed + // by parser. + + TokenHiddenChannel = 1 +) + +func (b *BaseToken) GetChannel() int { + return b.channel +} + +func (b *BaseToken) GetStart() int { + return b.start +} + +func (b *BaseToken) GetStop() int { + return b.stop +} + +func (b *BaseToken) GetLine() int { + return b.line +} + +func (b *BaseToken) GetColumn() int { + return b.column +} + +func (b *BaseToken) GetTokenType() int { + return b.tokenType +} + +func (b *BaseToken) GetSource() *TokenSourceCharStreamPair { + return b.source +} + +func (b *BaseToken) GetTokenIndex() int { + return b.tokenIndex +} + +func (b *BaseToken) SetTokenIndex(v int) { + b.tokenIndex = v +} + +func (b *BaseToken) GetTokenSource() TokenSource { + return b.source.tokenSource +} + +func (b *BaseToken) GetInputStream() CharStream { + return b.source.charStream +} + +type CommonToken struct { + *BaseToken +} + +func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start, stop int) *CommonToken { + + t := new(CommonToken) + + t.BaseToken = new(BaseToken) + + t.source = source + t.tokenType = tokenType + t.channel = channel + t.start = start + t.stop = stop + t.tokenIndex = -1 + if t.source.tokenSource != nil { + t.line = source.tokenSource.GetLine() + t.column = source.tokenSource.GetCharPositionInLine() + } else { + t.column = -1 + } + return t +} + +// An empty {@link Pair} which is used as the default value of +// {@link //source} for tokens that do not have a source. + +//CommonToken.EMPTY_SOURCE = [ nil, nil ] + +// Constructs a New{@link CommonToken} as a copy of another {@link Token}. +// +//

+// If {@code oldToken} is also a {@link CommonToken} instance, the newly +// constructed token will share a reference to the {@link //text} field and +// the {@link Pair} stored in {@link //source}. Otherwise, {@link //text} will +// be assigned the result of calling {@link //GetText}, and {@link //source} +// will be constructed from the result of {@link Token//GetTokenSource} and +// {@link Token//GetInputStream}.

+// +// @param oldToken The token to copy. +func (c *CommonToken) clone() *CommonToken { + t := NewCommonToken(c.source, c.tokenType, c.channel, c.start, c.stop) + t.tokenIndex = c.GetTokenIndex() + t.line = c.GetLine() + t.column = c.GetColumn() + t.text = c.GetText() + return t +} + +func (c *CommonToken) GetText() string { + if c.text != "" { + return c.text + } + input := c.GetInputStream() + if input == nil { + return "" + } + n := input.Size() + if c.start < n && c.stop < n { + return input.GetTextFromInterval(NewInterval(c.start, c.stop)) + } + return "" +} + +func (c *CommonToken) SetText(text string) { + c.text = text +} + +func (c *CommonToken) String() string { + txt := c.GetText() + if txt != "" { + txt = strings.Replace(txt, "\n", "\\n", -1) + txt = strings.Replace(txt, "\r", "\\r", -1) + txt = strings.Replace(txt, "\t", "\\t", -1) + } else { + txt = "" + } + + var ch string + if c.channel > 0 { + ch = ",channel=" + strconv.Itoa(c.channel) + } else { + ch = "" + } + + return "[@" + strconv.Itoa(c.tokenIndex) + "," + strconv.Itoa(c.start) + ":" + strconv.Itoa(c.stop) + "='" + + txt + "',<" + strconv.Itoa(c.tokenType) + ">" + + ch + "," + strconv.Itoa(c.line) + ":" + strconv.Itoa(c.column) + "]" +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_source.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_source.go index a3f36eaa67..8027e40f8f 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_source.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_source.go @@ -1,17 +1,17 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -type TokenSource interface { - NextToken() Token - Skip() - More() - GetLine() int - GetCharPositionInLine() int - GetInputStream() CharStream - GetSourceName() string - setTokenFactory(factory TokenFactory) - GetTokenFactory() TokenFactory -} +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +type TokenSource interface { + NextToken() Token + Skip() + More() + GetLine() int + GetCharPositionInLine() int + GetInputStream() CharStream + GetSourceName() string + setTokenFactory(factory TokenFactory) + GetTokenFactory() TokenFactory +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_stream.go index 1527d43f60..ace54684ee 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_stream.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/token_stream.go @@ -1,20 +1,20 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -type TokenStream interface { - IntStream - - LT(k int) Token - - Get(index int) Token - GetTokenSource() TokenSource - SetTokenSource(TokenSource) - - GetAllText() string - GetTextFromInterval(*Interval) string - GetTextFromRuleContext(RuleContext) string - GetTextFromTokens(Token, Token) string -} +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +type TokenStream interface { + IntStream + + LT(k int) Token + + Get(index int) Token + GetTokenSource() TokenSource + SetTokenSource(TokenSource) + + GetAllText() string + GetTextFromInterval(*Interval) string + GetTextFromRuleContext(RuleContext) string + GetTextFromTokens(Token, Token) string +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tokenstream_rewriter.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tokenstream_rewriter.go index b3e38af344..2946e8b27f 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tokenstream_rewriter.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tokenstream_rewriter.go @@ -1,659 +1,659 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "bytes" - "fmt" -) - -// -// Useful for rewriting out a buffered input token stream after doing some -// augmentation or other manipulations on it. - -//

-// You can insert stuff, replace, and delete chunks. Note that the operations -// are done lazily--only if you convert the buffer to a {@link String} with -// {@link TokenStream#getText()}. This is very efficient because you are not -// moving data around all the time. As the buffer of tokens is converted to -// strings, the {@link #getText()} method(s) scan the input token stream and -// check to see if there is an operation at the current index. If so, the -// operation is done and then normal {@link String} rendering continues on the -// buffer. This is like having multiple Turing machine instruction streams -// (programs) operating on a single input tape. :)

-//

- -// This rewriter makes no modifications to the token stream. It does not ask the -// stream to fill itself up nor does it advance the input cursor. The token -// stream {@link TokenStream#index()} will return the same value before and -// after any {@link #getText()} call.

- -//

-// The rewriter only works on tokens that you have in the buffer and ignores the -// current input cursor. If you are buffering tokens on-demand, calling -// {@link #getText()} halfway through the input will only do rewrites for those -// tokens in the first half of the file.

- -//

-// Since the operations are done lazily at {@link #getText}-time, operations do -// not screw up the token index values. That is, an insert operation at token -// index {@code i} does not change the index values for tokens -// {@code i}+1..n-1.

- -//

-// Because operations never actually alter the buffer, you may always get the -// original token stream back without undoing anything. Since the instructions -// are queued up, you can easily simulate transactions and roll back any changes -// if there is an error just by removing instructions. For example,

- -//
-// CharStream input = new ANTLRFileStream("input");
-// TLexer lex = new TLexer(input);
-// CommonTokenStream tokens = new CommonTokenStream(lex);
-// T parser = new T(tokens);
-// TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens);
-// parser.startRule();
-// 
- -//

-// Then in the rules, you can execute (assuming rewriter is visible):

- -//
-// Token t,u;
-// ...
-// rewriter.insertAfter(t, "text to put after t");}
-// rewriter.insertAfter(u, "text after u");}
-// System.out.println(rewriter.getText());
-// 
- -//

-// You can also have multiple "instruction streams" and get multiple rewrites -// from a single pass over the input. Just name the instruction streams and use -// that name again when printing the buffer. This could be useful for generating -// a C file and also its header file--all from the same buffer:

- -//
-// rewriter.insertAfter("pass1", t, "text to put after t");}
-// rewriter.insertAfter("pass2", u, "text after u");}
-// System.out.println(rewriter.getText("pass1"));
-// System.out.println(rewriter.getText("pass2"));
-// 
- -//

-// If you don't use named rewrite streams, a "default" stream is used as the -// first example shows.

- -const ( - Default_Program_Name = "default" - Program_Init_Size = 100 - Min_Token_Index = 0 -) - -// Define the rewrite operation hierarchy - -type RewriteOperation interface { - // Execute the rewrite operation by possibly adding to the buffer. - // Return the index of the next token to operate on. - Execute(buffer *bytes.Buffer) int - String() string - GetInstructionIndex() int - GetIndex() int - GetText() string - GetOpName() string - GetTokens() TokenStream - SetInstructionIndex(val int) - SetIndex(int) - SetText(string) - SetOpName(string) - SetTokens(TokenStream) -} - -type BaseRewriteOperation struct { - //Current index of rewrites list - instruction_index int - //Token buffer index - index int - //Substitution text - text string - //Actual operation name - op_name string - //Pointer to token steam - tokens TokenStream -} - -func (op *BaseRewriteOperation) GetInstructionIndex() int { - return op.instruction_index -} - -func (op *BaseRewriteOperation) GetIndex() int { - return op.index -} - -func (op *BaseRewriteOperation) GetText() string { - return op.text -} - -func (op *BaseRewriteOperation) GetOpName() string { - return op.op_name -} - -func (op *BaseRewriteOperation) GetTokens() TokenStream { - return op.tokens -} - -func (op *BaseRewriteOperation) SetInstructionIndex(val int) { - op.instruction_index = val -} - -func (op *BaseRewriteOperation) SetIndex(val int) { - op.index = val -} - -func (op *BaseRewriteOperation) SetText(val string) { - op.text = val -} - -func (op *BaseRewriteOperation) SetOpName(val string) { - op.op_name = val -} - -func (op *BaseRewriteOperation) SetTokens(val TokenStream) { - op.tokens = val -} - -func (op *BaseRewriteOperation) Execute(buffer *bytes.Buffer) int { - return op.index -} - -func (op *BaseRewriteOperation) String() string { - return fmt.Sprintf("<%s@%d:\"%s\">", - op.op_name, - op.tokens.Get(op.GetIndex()), - op.text, - ) - -} - -type InsertBeforeOp struct { - BaseRewriteOperation -} - -func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp { - return &InsertBeforeOp{BaseRewriteOperation: BaseRewriteOperation{ - index: index, - text: text, - op_name: "InsertBeforeOp", - tokens: stream, - }} -} - -func (op *InsertBeforeOp) Execute(buffer *bytes.Buffer) int { - buffer.WriteString(op.text) - if op.tokens.Get(op.index).GetTokenType() != TokenEOF { - buffer.WriteString(op.tokens.Get(op.index).GetText()) - } - return op.index + 1 -} - -func (op *InsertBeforeOp) String() string { - return op.BaseRewriteOperation.String() -} - -// Distinguish between insert after/before to do the "insert afters" -// first and then the "insert befores" at same index. Implementation -// of "insert after" is "insert before index+1". - -type InsertAfterOp struct { - BaseRewriteOperation -} - -func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp { - return &InsertAfterOp{BaseRewriteOperation: BaseRewriteOperation{ - index: index + 1, - text: text, - tokens: stream, - }} -} - -func (op *InsertAfterOp) Execute(buffer *bytes.Buffer) int { - buffer.WriteString(op.text) - if op.tokens.Get(op.index).GetTokenType() != TokenEOF { - buffer.WriteString(op.tokens.Get(op.index).GetText()) - } - return op.index + 1 -} - -func (op *InsertAfterOp) String() string { - return op.BaseRewriteOperation.String() -} - -// I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp -// instructions. -type ReplaceOp struct { - BaseRewriteOperation - LastIndex int -} - -func NewReplaceOp(from, to int, text string, stream TokenStream) *ReplaceOp { - return &ReplaceOp{ - BaseRewriteOperation: BaseRewriteOperation{ - index: from, - text: text, - op_name: "ReplaceOp", - tokens: stream, - }, - LastIndex: to, - } -} - -func (op *ReplaceOp) Execute(buffer *bytes.Buffer) int { - if op.text != "" { - buffer.WriteString(op.text) - } - return op.LastIndex + 1 -} - -func (op *ReplaceOp) String() string { - if op.text == "" { - return fmt.Sprintf("", - op.tokens.Get(op.index), op.tokens.Get(op.LastIndex)) - } - return fmt.Sprintf("", - op.tokens.Get(op.index), op.tokens.Get(op.LastIndex), op.text) -} - -type TokenStreamRewriter struct { - //Our source stream - tokens TokenStream - // You may have multiple, named streams of rewrite operations. - // I'm calling these things "programs." - // Maps String (name) → rewrite (List) - programs map[string][]RewriteOperation - last_rewrite_token_indexes map[string]int -} - -func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter { - return &TokenStreamRewriter{ - tokens: tokens, - programs: map[string][]RewriteOperation{ - Default_Program_Name: make([]RewriteOperation, 0, Program_Init_Size), - }, - last_rewrite_token_indexes: map[string]int{}, - } -} - -func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream { - return tsr.tokens -} - -// Rollback the instruction stream for a program so that -// the indicated instruction (via instructionIndex) is no -// longer in the stream. UNTESTED! -func (tsr *TokenStreamRewriter) Rollback(program_name string, instruction_index int) { - is, ok := tsr.programs[program_name] - if ok { - tsr.programs[program_name] = is[Min_Token_Index:instruction_index] - } -} - -func (tsr *TokenStreamRewriter) RollbackDefault(instruction_index int) { - tsr.Rollback(Default_Program_Name, instruction_index) -} - -// Reset the program so that no instructions exist -func (tsr *TokenStreamRewriter) DeleteProgram(program_name string) { - tsr.Rollback(program_name, Min_Token_Index) //TODO: double test on that cause lower bound is not included -} - -func (tsr *TokenStreamRewriter) DeleteProgramDefault() { - tsr.DeleteProgram(Default_Program_Name) -} - -func (tsr *TokenStreamRewriter) InsertAfter(program_name string, index int, text string) { - // to insert after, just insert before next index (even if past end) - var op RewriteOperation = NewInsertAfterOp(index, text, tsr.tokens) - rewrites := tsr.GetProgram(program_name) - op.SetInstructionIndex(len(rewrites)) - tsr.AddToProgram(program_name, op) -} - -func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string) { - tsr.InsertAfter(Default_Program_Name, index, text) -} - -func (tsr *TokenStreamRewriter) InsertAfterToken(program_name string, token Token, text string) { - tsr.InsertAfter(program_name, token.GetTokenIndex(), text) -} - -func (tsr *TokenStreamRewriter) InsertBefore(program_name string, index int, text string) { - var op RewriteOperation = NewInsertBeforeOp(index, text, tsr.tokens) - rewrites := tsr.GetProgram(program_name) - op.SetInstructionIndex(len(rewrites)) - tsr.AddToProgram(program_name, op) -} - -func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string) { - tsr.InsertBefore(Default_Program_Name, index, text) -} - -func (tsr *TokenStreamRewriter) InsertBeforeToken(program_name string, token Token, text string) { - tsr.InsertBefore(program_name, token.GetTokenIndex(), text) -} - -func (tsr *TokenStreamRewriter) Replace(program_name string, from, to int, text string) { - if from > to || from < 0 || to < 0 || to >= tsr.tokens.Size() { - panic(fmt.Sprintf("replace: range invalid: %d..%d(size=%d)", - from, to, tsr.tokens.Size())) - } - var op RewriteOperation = NewReplaceOp(from, to, text, tsr.tokens) - rewrites := tsr.GetProgram(program_name) - op.SetInstructionIndex(len(rewrites)) - tsr.AddToProgram(program_name, op) -} - -func (tsr *TokenStreamRewriter) ReplaceDefault(from, to int, text string) { - tsr.Replace(Default_Program_Name, from, to, text) -} - -func (tsr *TokenStreamRewriter) ReplaceDefaultPos(index int, text string) { - tsr.ReplaceDefault(index, index, text) -} - -func (tsr *TokenStreamRewriter) ReplaceToken(program_name string, from, to Token, text string) { - tsr.Replace(program_name, from.GetTokenIndex(), to.GetTokenIndex(), text) -} - -func (tsr *TokenStreamRewriter) ReplaceTokenDefault(from, to Token, text string) { - tsr.ReplaceToken(Default_Program_Name, from, to, text) -} - -func (tsr *TokenStreamRewriter) ReplaceTokenDefaultPos(index Token, text string) { - tsr.ReplaceTokenDefault(index, index, text) -} - -func (tsr *TokenStreamRewriter) Delete(program_name string, from, to int) { - tsr.Replace(program_name, from, to, "") -} - -func (tsr *TokenStreamRewriter) DeleteDefault(from, to int) { - tsr.Delete(Default_Program_Name, from, to) -} - -func (tsr *TokenStreamRewriter) DeleteDefaultPos(index int) { - tsr.DeleteDefault(index, index) -} - -func (tsr *TokenStreamRewriter) DeleteToken(program_name string, from, to Token) { - tsr.ReplaceToken(program_name, from, to, "") -} - -func (tsr *TokenStreamRewriter) DeleteTokenDefault(from, to Token) { - tsr.DeleteToken(Default_Program_Name, from, to) -} - -func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndex(program_name string) int { - i, ok := tsr.last_rewrite_token_indexes[program_name] - if !ok { - return -1 - } - return i -} - -func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndexDefault() int { - return tsr.GetLastRewriteTokenIndex(Default_Program_Name) -} - -func (tsr *TokenStreamRewriter) SetLastRewriteTokenIndex(program_name string, i int) { - tsr.last_rewrite_token_indexes[program_name] = i -} - -func (tsr *TokenStreamRewriter) InitializeProgram(name string) []RewriteOperation { - is := make([]RewriteOperation, 0, Program_Init_Size) - tsr.programs[name] = is - return is -} - -func (tsr *TokenStreamRewriter) AddToProgram(name string, op RewriteOperation) { - is := tsr.GetProgram(name) - is = append(is, op) - tsr.programs[name] = is -} - -func (tsr *TokenStreamRewriter) GetProgram(name string) []RewriteOperation { - is, ok := tsr.programs[name] - if !ok { - is = tsr.InitializeProgram(name) - } - return is -} - -// Return the text from the original tokens altered per the -// instructions given to this rewriter. -func (tsr *TokenStreamRewriter) GetTextDefault() string { - return tsr.GetText( - Default_Program_Name, - NewInterval(0, tsr.tokens.Size()-1)) -} - -// Return the text from the original tokens altered per the -// instructions given to this rewriter. -func (tsr *TokenStreamRewriter) GetText(program_name string, interval *Interval) string { - rewrites := tsr.programs[program_name] - start := interval.Start - stop := interval.Stop - // ensure start/end are in range - stop = min(stop, tsr.tokens.Size()-1) - start = max(start, 0) - if rewrites == nil || len(rewrites) == 0 { - return tsr.tokens.GetTextFromInterval(interval) // no instructions to execute - } - buf := bytes.Buffer{} - // First, optimize instruction stream - indexToOp := reduceToSingleOperationPerIndex(rewrites) - // Walk buffer, executing instructions and emitting tokens - for i := start; i <= stop && i < tsr.tokens.Size(); { - op := indexToOp[i] - delete(indexToOp, i) // remove so any left have index size-1 - t := tsr.tokens.Get(i) - if op == nil { - // no operation at that index, just dump token - if t.GetTokenType() != TokenEOF { - buf.WriteString(t.GetText()) - } - i++ // move to next token - } else { - i = op.Execute(&buf) // execute operation and skip - } - } - // include stuff after end if it's last index in buffer - // So, if they did an insertAfter(lastValidIndex, "foo"), include - // foo if end==lastValidIndex. - if stop == tsr.tokens.Size()-1 { - // Scan any remaining operations after last token - // should be included (they will be inserts). - for _, op := range indexToOp { - if op.GetIndex() >= tsr.tokens.Size()-1 { - buf.WriteString(op.GetText()) - } - } - } - return buf.String() -} - -// We need to combine operations and report invalid operations (like -// overlapping replaces that are not completed nested). Inserts to -// same index need to be combined etc... Here are the cases: -// -// I.i.u I.j.v leave alone, nonoverlapping -// I.i.u I.i.v combine: Iivu -// -// R.i-j.u R.x-y.v | i-j in x-y delete first R -// R.i-j.u R.i-j.v delete first R -// R.i-j.u R.x-y.v | x-y in i-j ERROR -// R.i-j.u R.x-y.v | boundaries overlap ERROR -// -// Delete special case of replace (text==null): -// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right) -// -// I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before -// we're not deleting i) -// I.i.u R.x-y.v | i not in (x+1)-y leave alone, nonoverlapping -// R.x-y.v I.i.u | i in x-y ERROR -// R.x-y.v I.x.u R.x-y.uv (combine, delete I) -// R.x-y.v I.i.u | i not in x-y leave alone, nonoverlapping -// -// I.i.u = insert u before op @ index i -// R.x-y.u = replace x-y indexed tokens with u -// -// First we need to examine replaces. For any replace op: -// -// 1. wipe out any insertions before op within that range. -// 2. Drop any replace op before that is contained completely within -// that range. -// 3. Throw exception upon boundary overlap with any previous replace. -// -// Then we can deal with inserts: -// -// 1. for any inserts to same index, combine even if not adjacent. -// 2. for any prior replace with same left boundary, combine this -// insert with replace and delete this replace. -// 3. throw exception if index in same range as previous replace -// -// Don't actually delete; make op null in list. Easier to walk list. -// Later we can throw as we add to index → op map. -// -// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the -// inserted stuff would be before the replace range. But, if you -// add tokens in front of a method body '{' and then delete the method -// body, I think the stuff before the '{' you added should disappear too. -// -// Return a map from token index to operation. -func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]RewriteOperation { - // WALK REPLACES - for i := 0; i < len(rewrites); i++ { - op := rewrites[i] - if op == nil { - continue - } - rop, ok := op.(*ReplaceOp) - if !ok { - continue - } - // Wipe prior inserts within range - for j := 0; j < i && j < len(rewrites); j++ { - if iop, ok := rewrites[j].(*InsertBeforeOp); ok { - if iop.index == rop.index { - // E.g., insert before 2, delete 2..2; update replace - // text to include insert before, kill insert - rewrites[iop.instruction_index] = nil - if rop.text != "" { - rop.text = iop.text + rop.text - } else { - rop.text = iop.text - } - } else if iop.index > rop.index && iop.index <= rop.LastIndex { - // delete insert as it's a no-op. - rewrites[iop.instruction_index] = nil - } - } - } - // Drop any prior replaces contained within - for j := 0; j < i && j < len(rewrites); j++ { - if prevop, ok := rewrites[j].(*ReplaceOp); ok { - if prevop.index >= rop.index && prevop.LastIndex <= rop.LastIndex { - // delete replace as it's a no-op. - rewrites[prevop.instruction_index] = nil - continue - } - // throw exception unless disjoint or identical - disjoint := prevop.LastIndex < rop.index || prevop.index > rop.LastIndex - // Delete special case of replace (text==null): - // D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right) - if prevop.text == "" && rop.text == "" && !disjoint { - rewrites[prevop.instruction_index] = nil - rop.index = min(prevop.index, rop.index) - rop.LastIndex = max(prevop.LastIndex, rop.LastIndex) - println("new rop" + rop.String()) //TODO: remove console write, taken from Java version - } else if !disjoint { - panic("replace op boundaries of " + rop.String() + " overlap with previous " + prevop.String()) - } - } - } - } - // WALK INSERTS - for i := 0; i < len(rewrites); i++ { - op := rewrites[i] - if op == nil { - continue - } - //hack to replicate inheritance in composition - _, iok := rewrites[i].(*InsertBeforeOp) - _, aok := rewrites[i].(*InsertAfterOp) - if !iok && !aok { - continue - } - iop := rewrites[i] - // combine current insert with prior if any at same index - // deviating a bit from TokenStreamRewriter.java - hard to incorporate inheritance logic - for j := 0; j < i && j < len(rewrites); j++ { - if nextIop, ok := rewrites[j].(*InsertAfterOp); ok { - if nextIop.index == iop.GetIndex() { - iop.SetText(nextIop.text + iop.GetText()) - rewrites[j] = nil - } - } - if prevIop, ok := rewrites[j].(*InsertBeforeOp); ok { - if prevIop.index == iop.GetIndex() { - iop.SetText(iop.GetText() + prevIop.text) - rewrites[prevIop.instruction_index] = nil - } - } - } - // look for replaces where iop.index is in range; error - for j := 0; j < i && j < len(rewrites); j++ { - if rop, ok := rewrites[j].(*ReplaceOp); ok { - if iop.GetIndex() == rop.index { - rop.text = iop.GetText() + rop.text - rewrites[i] = nil - continue - } - if iop.GetIndex() >= rop.index && iop.GetIndex() <= rop.LastIndex { - panic("insert op " + iop.String() + " within boundaries of previous " + rop.String()) - } - } - } - } - m := map[int]RewriteOperation{} - for i := 0; i < len(rewrites); i++ { - op := rewrites[i] - if op == nil { - continue - } - if _, ok := m[op.GetIndex()]; ok { - panic("should only be one op per index") - } - m[op.GetIndex()] = op - } - return m -} - -/* - Quick fixing Go lack of overloads -*/ - -func max(a, b int) int { - if a > b { - return a - } else { - return b - } -} -func min(a, b int) int { - if a < b { - return a - } else { - return b - } -} +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "bytes" + "fmt" +) + +// +// Useful for rewriting out a buffered input token stream after doing some +// augmentation or other manipulations on it. + +//

+// You can insert stuff, replace, and delete chunks. Note that the operations +// are done lazily--only if you convert the buffer to a {@link String} with +// {@link TokenStream#getText()}. This is very efficient because you are not +// moving data around all the time. As the buffer of tokens is converted to +// strings, the {@link #getText()} method(s) scan the input token stream and +// check to see if there is an operation at the current index. If so, the +// operation is done and then normal {@link String} rendering continues on the +// buffer. This is like having multiple Turing machine instruction streams +// (programs) operating on a single input tape. :)

+//

+ +// This rewriter makes no modifications to the token stream. It does not ask the +// stream to fill itself up nor does it advance the input cursor. The token +// stream {@link TokenStream#index()} will return the same value before and +// after any {@link #getText()} call.

+ +//

+// The rewriter only works on tokens that you have in the buffer and ignores the +// current input cursor. If you are buffering tokens on-demand, calling +// {@link #getText()} halfway through the input will only do rewrites for those +// tokens in the first half of the file.

+ +//

+// Since the operations are done lazily at {@link #getText}-time, operations do +// not screw up the token index values. That is, an insert operation at token +// index {@code i} does not change the index values for tokens +// {@code i}+1..n-1.

+ +//

+// Because operations never actually alter the buffer, you may always get the +// original token stream back without undoing anything. Since the instructions +// are queued up, you can easily simulate transactions and roll back any changes +// if there is an error just by removing instructions. For example,

+ +//
+// CharStream input = new ANTLRFileStream("input");
+// TLexer lex = new TLexer(input);
+// CommonTokenStream tokens = new CommonTokenStream(lex);
+// T parser = new T(tokens);
+// TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens);
+// parser.startRule();
+// 
+ +//

+// Then in the rules, you can execute (assuming rewriter is visible):

+ +//
+// Token t,u;
+// ...
+// rewriter.insertAfter(t, "text to put after t");}
+// rewriter.insertAfter(u, "text after u");}
+// System.out.println(rewriter.getText());
+// 
+ +//

+// You can also have multiple "instruction streams" and get multiple rewrites +// from a single pass over the input. Just name the instruction streams and use +// that name again when printing the buffer. This could be useful for generating +// a C file and also its header file--all from the same buffer:

+ +//
+// rewriter.insertAfter("pass1", t, "text to put after t");}
+// rewriter.insertAfter("pass2", u, "text after u");}
+// System.out.println(rewriter.getText("pass1"));
+// System.out.println(rewriter.getText("pass2"));
+// 
+ +//

+// If you don't use named rewrite streams, a "default" stream is used as the +// first example shows.

+ +const ( + Default_Program_Name = "default" + Program_Init_Size = 100 + Min_Token_Index = 0 +) + +// Define the rewrite operation hierarchy + +type RewriteOperation interface { + // Execute the rewrite operation by possibly adding to the buffer. + // Return the index of the next token to operate on. + Execute(buffer *bytes.Buffer) int + String() string + GetInstructionIndex() int + GetIndex() int + GetText() string + GetOpName() string + GetTokens() TokenStream + SetInstructionIndex(val int) + SetIndex(int) + SetText(string) + SetOpName(string) + SetTokens(TokenStream) +} + +type BaseRewriteOperation struct { + //Current index of rewrites list + instruction_index int + //Token buffer index + index int + //Substitution text + text string + //Actual operation name + op_name string + //Pointer to token steam + tokens TokenStream +} + +func (op *BaseRewriteOperation) GetInstructionIndex() int { + return op.instruction_index +} + +func (op *BaseRewriteOperation) GetIndex() int { + return op.index +} + +func (op *BaseRewriteOperation) GetText() string { + return op.text +} + +func (op *BaseRewriteOperation) GetOpName() string { + return op.op_name +} + +func (op *BaseRewriteOperation) GetTokens() TokenStream { + return op.tokens +} + +func (op *BaseRewriteOperation) SetInstructionIndex(val int) { + op.instruction_index = val +} + +func (op *BaseRewriteOperation) SetIndex(val int) { + op.index = val +} + +func (op *BaseRewriteOperation) SetText(val string) { + op.text = val +} + +func (op *BaseRewriteOperation) SetOpName(val string) { + op.op_name = val +} + +func (op *BaseRewriteOperation) SetTokens(val TokenStream) { + op.tokens = val +} + +func (op *BaseRewriteOperation) Execute(buffer *bytes.Buffer) int { + return op.index +} + +func (op *BaseRewriteOperation) String() string { + return fmt.Sprintf("<%s@%d:\"%s\">", + op.op_name, + op.tokens.Get(op.GetIndex()), + op.text, + ) + +} + +type InsertBeforeOp struct { + BaseRewriteOperation +} + +func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp { + return &InsertBeforeOp{BaseRewriteOperation: BaseRewriteOperation{ + index: index, + text: text, + op_name: "InsertBeforeOp", + tokens: stream, + }} +} + +func (op *InsertBeforeOp) Execute(buffer *bytes.Buffer) int { + buffer.WriteString(op.text) + if op.tokens.Get(op.index).GetTokenType() != TokenEOF { + buffer.WriteString(op.tokens.Get(op.index).GetText()) + } + return op.index + 1 +} + +func (op *InsertBeforeOp) String() string { + return op.BaseRewriteOperation.String() +} + +// Distinguish between insert after/before to do the "insert afters" +// first and then the "insert befores" at same index. Implementation +// of "insert after" is "insert before index+1". + +type InsertAfterOp struct { + BaseRewriteOperation +} + +func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp { + return &InsertAfterOp{BaseRewriteOperation: BaseRewriteOperation{ + index: index + 1, + text: text, + tokens: stream, + }} +} + +func (op *InsertAfterOp) Execute(buffer *bytes.Buffer) int { + buffer.WriteString(op.text) + if op.tokens.Get(op.index).GetTokenType() != TokenEOF { + buffer.WriteString(op.tokens.Get(op.index).GetText()) + } + return op.index + 1 +} + +func (op *InsertAfterOp) String() string { + return op.BaseRewriteOperation.String() +} + +// I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp +// instructions. +type ReplaceOp struct { + BaseRewriteOperation + LastIndex int +} + +func NewReplaceOp(from, to int, text string, stream TokenStream) *ReplaceOp { + return &ReplaceOp{ + BaseRewriteOperation: BaseRewriteOperation{ + index: from, + text: text, + op_name: "ReplaceOp", + tokens: stream, + }, + LastIndex: to, + } +} + +func (op *ReplaceOp) Execute(buffer *bytes.Buffer) int { + if op.text != "" { + buffer.WriteString(op.text) + } + return op.LastIndex + 1 +} + +func (op *ReplaceOp) String() string { + if op.text == "" { + return fmt.Sprintf("", + op.tokens.Get(op.index), op.tokens.Get(op.LastIndex)) + } + return fmt.Sprintf("", + op.tokens.Get(op.index), op.tokens.Get(op.LastIndex), op.text) +} + +type TokenStreamRewriter struct { + //Our source stream + tokens TokenStream + // You may have multiple, named streams of rewrite operations. + // I'm calling these things "programs." + // Maps String (name) → rewrite (List) + programs map[string][]RewriteOperation + last_rewrite_token_indexes map[string]int +} + +func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter { + return &TokenStreamRewriter{ + tokens: tokens, + programs: map[string][]RewriteOperation{ + Default_Program_Name: make([]RewriteOperation, 0, Program_Init_Size), + }, + last_rewrite_token_indexes: map[string]int{}, + } +} + +func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream { + return tsr.tokens +} + +// Rollback the instruction stream for a program so that +// the indicated instruction (via instructionIndex) is no +// longer in the stream. UNTESTED! +func (tsr *TokenStreamRewriter) Rollback(program_name string, instruction_index int) { + is, ok := tsr.programs[program_name] + if ok { + tsr.programs[program_name] = is[Min_Token_Index:instruction_index] + } +} + +func (tsr *TokenStreamRewriter) RollbackDefault(instruction_index int) { + tsr.Rollback(Default_Program_Name, instruction_index) +} + +// Reset the program so that no instructions exist +func (tsr *TokenStreamRewriter) DeleteProgram(program_name string) { + tsr.Rollback(program_name, Min_Token_Index) //TODO: double test on that cause lower bound is not included +} + +func (tsr *TokenStreamRewriter) DeleteProgramDefault() { + tsr.DeleteProgram(Default_Program_Name) +} + +func (tsr *TokenStreamRewriter) InsertAfter(program_name string, index int, text string) { + // to insert after, just insert before next index (even if past end) + var op RewriteOperation = NewInsertAfterOp(index, text, tsr.tokens) + rewrites := tsr.GetProgram(program_name) + op.SetInstructionIndex(len(rewrites)) + tsr.AddToProgram(program_name, op) +} + +func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string) { + tsr.InsertAfter(Default_Program_Name, index, text) +} + +func (tsr *TokenStreamRewriter) InsertAfterToken(program_name string, token Token, text string) { + tsr.InsertAfter(program_name, token.GetTokenIndex(), text) +} + +func (tsr *TokenStreamRewriter) InsertBefore(program_name string, index int, text string) { + var op RewriteOperation = NewInsertBeforeOp(index, text, tsr.tokens) + rewrites := tsr.GetProgram(program_name) + op.SetInstructionIndex(len(rewrites)) + tsr.AddToProgram(program_name, op) +} + +func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string) { + tsr.InsertBefore(Default_Program_Name, index, text) +} + +func (tsr *TokenStreamRewriter) InsertBeforeToken(program_name string, token Token, text string) { + tsr.InsertBefore(program_name, token.GetTokenIndex(), text) +} + +func (tsr *TokenStreamRewriter) Replace(program_name string, from, to int, text string) { + if from > to || from < 0 || to < 0 || to >= tsr.tokens.Size() { + panic(fmt.Sprintf("replace: range invalid: %d..%d(size=%d)", + from, to, tsr.tokens.Size())) + } + var op RewriteOperation = NewReplaceOp(from, to, text, tsr.tokens) + rewrites := tsr.GetProgram(program_name) + op.SetInstructionIndex(len(rewrites)) + tsr.AddToProgram(program_name, op) +} + +func (tsr *TokenStreamRewriter) ReplaceDefault(from, to int, text string) { + tsr.Replace(Default_Program_Name, from, to, text) +} + +func (tsr *TokenStreamRewriter) ReplaceDefaultPos(index int, text string) { + tsr.ReplaceDefault(index, index, text) +} + +func (tsr *TokenStreamRewriter) ReplaceToken(program_name string, from, to Token, text string) { + tsr.Replace(program_name, from.GetTokenIndex(), to.GetTokenIndex(), text) +} + +func (tsr *TokenStreamRewriter) ReplaceTokenDefault(from, to Token, text string) { + tsr.ReplaceToken(Default_Program_Name, from, to, text) +} + +func (tsr *TokenStreamRewriter) ReplaceTokenDefaultPos(index Token, text string) { + tsr.ReplaceTokenDefault(index, index, text) +} + +func (tsr *TokenStreamRewriter) Delete(program_name string, from, to int) { + tsr.Replace(program_name, from, to, "") +} + +func (tsr *TokenStreamRewriter) DeleteDefault(from, to int) { + tsr.Delete(Default_Program_Name, from, to) +} + +func (tsr *TokenStreamRewriter) DeleteDefaultPos(index int) { + tsr.DeleteDefault(index, index) +} + +func (tsr *TokenStreamRewriter) DeleteToken(program_name string, from, to Token) { + tsr.ReplaceToken(program_name, from, to, "") +} + +func (tsr *TokenStreamRewriter) DeleteTokenDefault(from, to Token) { + tsr.DeleteToken(Default_Program_Name, from, to) +} + +func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndex(program_name string) int { + i, ok := tsr.last_rewrite_token_indexes[program_name] + if !ok { + return -1 + } + return i +} + +func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndexDefault() int { + return tsr.GetLastRewriteTokenIndex(Default_Program_Name) +} + +func (tsr *TokenStreamRewriter) SetLastRewriteTokenIndex(program_name string, i int) { + tsr.last_rewrite_token_indexes[program_name] = i +} + +func (tsr *TokenStreamRewriter) InitializeProgram(name string) []RewriteOperation { + is := make([]RewriteOperation, 0, Program_Init_Size) + tsr.programs[name] = is + return is +} + +func (tsr *TokenStreamRewriter) AddToProgram(name string, op RewriteOperation) { + is := tsr.GetProgram(name) + is = append(is, op) + tsr.programs[name] = is +} + +func (tsr *TokenStreamRewriter) GetProgram(name string) []RewriteOperation { + is, ok := tsr.programs[name] + if !ok { + is = tsr.InitializeProgram(name) + } + return is +} + +// Return the text from the original tokens altered per the +// instructions given to this rewriter. +func (tsr *TokenStreamRewriter) GetTextDefault() string { + return tsr.GetText( + Default_Program_Name, + NewInterval(0, tsr.tokens.Size()-1)) +} + +// Return the text from the original tokens altered per the +// instructions given to this rewriter. +func (tsr *TokenStreamRewriter) GetText(program_name string, interval *Interval) string { + rewrites := tsr.programs[program_name] + start := interval.Start + stop := interval.Stop + // ensure start/end are in range + stop = min(stop, tsr.tokens.Size()-1) + start = max(start, 0) + if rewrites == nil || len(rewrites) == 0 { + return tsr.tokens.GetTextFromInterval(interval) // no instructions to execute + } + buf := bytes.Buffer{} + // First, optimize instruction stream + indexToOp := reduceToSingleOperationPerIndex(rewrites) + // Walk buffer, executing instructions and emitting tokens + for i := start; i <= stop && i < tsr.tokens.Size(); { + op := indexToOp[i] + delete(indexToOp, i) // remove so any left have index size-1 + t := tsr.tokens.Get(i) + if op == nil { + // no operation at that index, just dump token + if t.GetTokenType() != TokenEOF { + buf.WriteString(t.GetText()) + } + i++ // move to next token + } else { + i = op.Execute(&buf) // execute operation and skip + } + } + // include stuff after end if it's last index in buffer + // So, if they did an insertAfter(lastValidIndex, "foo"), include + // foo if end==lastValidIndex. + if stop == tsr.tokens.Size()-1 { + // Scan any remaining operations after last token + // should be included (they will be inserts). + for _, op := range indexToOp { + if op.GetIndex() >= tsr.tokens.Size()-1 { + buf.WriteString(op.GetText()) + } + } + } + return buf.String() +} + +// We need to combine operations and report invalid operations (like +// overlapping replaces that are not completed nested). Inserts to +// same index need to be combined etc... Here are the cases: +// +// I.i.u I.j.v leave alone, nonoverlapping +// I.i.u I.i.v combine: Iivu +// +// R.i-j.u R.x-y.v | i-j in x-y delete first R +// R.i-j.u R.i-j.v delete first R +// R.i-j.u R.x-y.v | x-y in i-j ERROR +// R.i-j.u R.x-y.v | boundaries overlap ERROR +// +// Delete special case of replace (text==null): +// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right) +// +// I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before +// we're not deleting i) +// I.i.u R.x-y.v | i not in (x+1)-y leave alone, nonoverlapping +// R.x-y.v I.i.u | i in x-y ERROR +// R.x-y.v I.x.u R.x-y.uv (combine, delete I) +// R.x-y.v I.i.u | i not in x-y leave alone, nonoverlapping +// +// I.i.u = insert u before op @ index i +// R.x-y.u = replace x-y indexed tokens with u +// +// First we need to examine replaces. For any replace op: +// +// 1. wipe out any insertions before op within that range. +// 2. Drop any replace op before that is contained completely within +// that range. +// 3. Throw exception upon boundary overlap with any previous replace. +// +// Then we can deal with inserts: +// +// 1. for any inserts to same index, combine even if not adjacent. +// 2. for any prior replace with same left boundary, combine this +// insert with replace and delete this replace. +// 3. throw exception if index in same range as previous replace +// +// Don't actually delete; make op null in list. Easier to walk list. +// Later we can throw as we add to index → op map. +// +// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the +// inserted stuff would be before the replace range. But, if you +// add tokens in front of a method body '{' and then delete the method +// body, I think the stuff before the '{' you added should disappear too. +// +// Return a map from token index to operation. +func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]RewriteOperation { + // WALK REPLACES + for i := 0; i < len(rewrites); i++ { + op := rewrites[i] + if op == nil { + continue + } + rop, ok := op.(*ReplaceOp) + if !ok { + continue + } + // Wipe prior inserts within range + for j := 0; j < i && j < len(rewrites); j++ { + if iop, ok := rewrites[j].(*InsertBeforeOp); ok { + if iop.index == rop.index { + // E.g., insert before 2, delete 2..2; update replace + // text to include insert before, kill insert + rewrites[iop.instruction_index] = nil + if rop.text != "" { + rop.text = iop.text + rop.text + } else { + rop.text = iop.text + } + } else if iop.index > rop.index && iop.index <= rop.LastIndex { + // delete insert as it's a no-op. + rewrites[iop.instruction_index] = nil + } + } + } + // Drop any prior replaces contained within + for j := 0; j < i && j < len(rewrites); j++ { + if prevop, ok := rewrites[j].(*ReplaceOp); ok { + if prevop.index >= rop.index && prevop.LastIndex <= rop.LastIndex { + // delete replace as it's a no-op. + rewrites[prevop.instruction_index] = nil + continue + } + // throw exception unless disjoint or identical + disjoint := prevop.LastIndex < rop.index || prevop.index > rop.LastIndex + // Delete special case of replace (text==null): + // D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right) + if prevop.text == "" && rop.text == "" && !disjoint { + rewrites[prevop.instruction_index] = nil + rop.index = min(prevop.index, rop.index) + rop.LastIndex = max(prevop.LastIndex, rop.LastIndex) + println("new rop" + rop.String()) //TODO: remove console write, taken from Java version + } else if !disjoint { + panic("replace op boundaries of " + rop.String() + " overlap with previous " + prevop.String()) + } + } + } + } + // WALK INSERTS + for i := 0; i < len(rewrites); i++ { + op := rewrites[i] + if op == nil { + continue + } + //hack to replicate inheritance in composition + _, iok := rewrites[i].(*InsertBeforeOp) + _, aok := rewrites[i].(*InsertAfterOp) + if !iok && !aok { + continue + } + iop := rewrites[i] + // combine current insert with prior if any at same index + // deviating a bit from TokenStreamRewriter.java - hard to incorporate inheritance logic + for j := 0; j < i && j < len(rewrites); j++ { + if nextIop, ok := rewrites[j].(*InsertAfterOp); ok { + if nextIop.index == iop.GetIndex() { + iop.SetText(nextIop.text + iop.GetText()) + rewrites[j] = nil + } + } + if prevIop, ok := rewrites[j].(*InsertBeforeOp); ok { + if prevIop.index == iop.GetIndex() { + iop.SetText(iop.GetText() + prevIop.text) + rewrites[prevIop.instruction_index] = nil + } + } + } + // look for replaces where iop.index is in range; error + for j := 0; j < i && j < len(rewrites); j++ { + if rop, ok := rewrites[j].(*ReplaceOp); ok { + if iop.GetIndex() == rop.index { + rop.text = iop.GetText() + rop.text + rewrites[i] = nil + continue + } + if iop.GetIndex() >= rop.index && iop.GetIndex() <= rop.LastIndex { + panic("insert op " + iop.String() + " within boundaries of previous " + rop.String()) + } + } + } + } + m := map[int]RewriteOperation{} + for i := 0; i < len(rewrites); i++ { + op := rewrites[i] + if op == nil { + continue + } + if _, ok := m[op.GetIndex()]; ok { + panic("should only be one op per index") + } + m[op.GetIndex()] = op + } + return m +} + +/* + Quick fixing Go lack of overloads +*/ + +func max(a, b int) int { + if a > b { + return a + } else { + return b + } +} +func min(a, b int) int { + if a < b { + return a + } else { + return b + } +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trace_listener.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trace_listener.go index 7b663bf849..c5036fd23a 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trace_listener.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trace_listener.go @@ -1,32 +1,32 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import "fmt" - -type TraceListener struct { - parser *BaseParser -} - -func NewTraceListener(parser *BaseParser) *TraceListener { - tl := new(TraceListener) - tl.parser = parser - return tl -} - -func (t *TraceListener) VisitErrorNode(_ ErrorNode) { -} - -func (t *TraceListener) EnterEveryRule(ctx ParserRuleContext) { - fmt.Println("enter " + t.parser.GetRuleNames()[ctx.GetRuleIndex()] + ", LT(1)=" + t.parser.input.LT(1).GetText()) -} - -func (t *TraceListener) VisitTerminal(node TerminalNode) { - fmt.Println("consume " + fmt.Sprint(node.GetSymbol()) + " rule " + t.parser.GetRuleNames()[t.parser.ctx.GetRuleIndex()]) -} - -func (t *TraceListener) ExitEveryRule(ctx ParserRuleContext) { - fmt.Println("exit " + t.parser.GetRuleNames()[ctx.GetRuleIndex()] + ", LT(1)=" + t.parser.input.LT(1).GetText()) -} +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import "fmt" + +type TraceListener struct { + parser *BaseParser +} + +func NewTraceListener(parser *BaseParser) *TraceListener { + tl := new(TraceListener) + tl.parser = parser + return tl +} + +func (t *TraceListener) VisitErrorNode(_ ErrorNode) { +} + +func (t *TraceListener) EnterEveryRule(ctx ParserRuleContext) { + fmt.Println("enter " + t.parser.GetRuleNames()[ctx.GetRuleIndex()] + ", LT(1)=" + t.parser.input.LT(1).GetText()) +} + +func (t *TraceListener) VisitTerminal(node TerminalNode) { + fmt.Println("consume " + fmt.Sprint(node.GetSymbol()) + " rule " + t.parser.GetRuleNames()[t.parser.ctx.GetRuleIndex()]) +} + +func (t *TraceListener) ExitEveryRule(ctx ParserRuleContext) { + fmt.Println("exit " + t.parser.GetRuleNames()[ctx.GetRuleIndex()] + ", LT(1)=" + t.parser.input.LT(1).GetText()) +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/transition.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/transition.go index 36be4f7331..c4d4fa3453 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/transition.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/transition.go @@ -1,428 +1,428 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "fmt" - "strconv" - "strings" -) - -// atom, set, epsilon, action, predicate, rule transitions. -// -//

This is a one way link. It emanates from a state (usually via a list of -// transitions) and has a target state.

-// -//

Since we never have to change the ATN transitions once we construct it, -// the states. We'll use the term Edge for the DFA to distinguish them from -// ATN transitions.

- -type Transition interface { - getTarget() ATNState - setTarget(ATNState) - getIsEpsilon() bool - getLabel() *IntervalSet - getSerializationType() int - Matches(int, int, int) bool -} - -type BaseTransition struct { - target ATNState - isEpsilon bool - label int - intervalSet *IntervalSet - serializationType int -} - -func NewBaseTransition(target ATNState) *BaseTransition { - - if target == nil { - panic("target cannot be nil.") - } - - t := new(BaseTransition) - - t.target = target - // Are we epsilon, action, sempred? - t.isEpsilon = false - t.intervalSet = nil - - return t -} - -func (t *BaseTransition) getTarget() ATNState { - return t.target -} - -func (t *BaseTransition) setTarget(s ATNState) { - t.target = s -} - -func (t *BaseTransition) getIsEpsilon() bool { - return t.isEpsilon -} - -func (t *BaseTransition) getLabel() *IntervalSet { - return t.intervalSet -} - -func (t *BaseTransition) getSerializationType() int { - return t.serializationType -} - -func (t *BaseTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - panic("Not implemented") -} - -const ( - TransitionEPSILON = 1 - TransitionRANGE = 2 - TransitionRULE = 3 - TransitionPREDICATE = 4 // e.g., {isType(input.LT(1))}? - TransitionATOM = 5 - TransitionACTION = 6 - TransitionSET = 7 // ~(A|B) or ~atom, wildcard, which convert to next 2 - TransitionNOTSET = 8 - TransitionWILDCARD = 9 - TransitionPRECEDENCE = 10 -) - -var TransitionserializationNames = []string{ - "INVALID", - "EPSILON", - "RANGE", - "RULE", - "PREDICATE", - "ATOM", - "ACTION", - "SET", - "NOT_SET", - "WILDCARD", - "PRECEDENCE", -} - -//var TransitionserializationTypes struct { -// EpsilonTransition int -// RangeTransition int -// RuleTransition int -// PredicateTransition int -// AtomTransition int -// ActionTransition int -// SetTransition int -// NotSetTransition int -// WildcardTransition int -// PrecedencePredicateTransition int -//}{ -// TransitionEPSILON, -// TransitionRANGE, -// TransitionRULE, -// TransitionPREDICATE, -// TransitionATOM, -// TransitionACTION, -// TransitionSET, -// TransitionNOTSET, -// TransitionWILDCARD, -// TransitionPRECEDENCE -//} - -// TODO: make all transitions sets? no, should remove set edges -type AtomTransition struct { - *BaseTransition -} - -func NewAtomTransition(target ATNState, intervalSet int) *AtomTransition { - - t := new(AtomTransition) - t.BaseTransition = NewBaseTransition(target) - - t.label = intervalSet // The token type or character value or, signifies special intervalSet. - t.intervalSet = t.makeLabel() - t.serializationType = TransitionATOM - - return t -} - -func (t *AtomTransition) makeLabel() *IntervalSet { - s := NewIntervalSet() - s.addOne(t.label) - return s -} - -func (t *AtomTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return t.label == symbol -} - -func (t *AtomTransition) String() string { - return strconv.Itoa(t.label) -} - -type RuleTransition struct { - *BaseTransition - - followState ATNState - ruleIndex, precedence int -} - -func NewRuleTransition(ruleStart ATNState, ruleIndex, precedence int, followState ATNState) *RuleTransition { - - t := new(RuleTransition) - t.BaseTransition = NewBaseTransition(ruleStart) - - t.ruleIndex = ruleIndex - t.precedence = precedence - t.followState = followState - t.serializationType = TransitionRULE - t.isEpsilon = true - - return t -} - -func (t *RuleTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return false -} - -type EpsilonTransition struct { - *BaseTransition - - outermostPrecedenceReturn int -} - -func NewEpsilonTransition(target ATNState, outermostPrecedenceReturn int) *EpsilonTransition { - - t := new(EpsilonTransition) - t.BaseTransition = NewBaseTransition(target) - - t.serializationType = TransitionEPSILON - t.isEpsilon = true - t.outermostPrecedenceReturn = outermostPrecedenceReturn - return t -} - -func (t *EpsilonTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return false -} - -func (t *EpsilonTransition) String() string { - return "epsilon" -} - -type RangeTransition struct { - *BaseTransition - - start, stop int -} - -func NewRangeTransition(target ATNState, start, stop int) *RangeTransition { - - t := new(RangeTransition) - t.BaseTransition = NewBaseTransition(target) - - t.serializationType = TransitionRANGE - t.start = start - t.stop = stop - t.intervalSet = t.makeLabel() - return t -} - -func (t *RangeTransition) makeLabel() *IntervalSet { - s := NewIntervalSet() - s.addRange(t.start, t.stop) - return s -} - -func (t *RangeTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return symbol >= t.start && symbol <= t.stop -} - -func (t *RangeTransition) String() string { - var sb strings.Builder - sb.WriteByte('\'') - sb.WriteRune(rune(t.start)) - sb.WriteString("'..'") - sb.WriteRune(rune(t.stop)) - sb.WriteByte('\'') - return sb.String() -} - -type AbstractPredicateTransition interface { - Transition - IAbstractPredicateTransitionFoo() -} - -type BaseAbstractPredicateTransition struct { - *BaseTransition -} - -func NewBasePredicateTransition(target ATNState) *BaseAbstractPredicateTransition { - - t := new(BaseAbstractPredicateTransition) - t.BaseTransition = NewBaseTransition(target) - - return t -} - -func (a *BaseAbstractPredicateTransition) IAbstractPredicateTransitionFoo() {} - -type PredicateTransition struct { - *BaseAbstractPredicateTransition - - isCtxDependent bool - ruleIndex, predIndex int -} - -func NewPredicateTransition(target ATNState, ruleIndex, predIndex int, isCtxDependent bool) *PredicateTransition { - - t := new(PredicateTransition) - t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target) - - t.serializationType = TransitionPREDICATE - t.ruleIndex = ruleIndex - t.predIndex = predIndex - t.isCtxDependent = isCtxDependent // e.g., $i ref in pred - t.isEpsilon = true - return t -} - -func (t *PredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return false -} - -func (t *PredicateTransition) getPredicate() *Predicate { - return NewPredicate(t.ruleIndex, t.predIndex, t.isCtxDependent) -} - -func (t *PredicateTransition) String() string { - return "pred_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.predIndex) -} - -type ActionTransition struct { - *BaseTransition - - isCtxDependent bool - ruleIndex, actionIndex, predIndex int -} - -func NewActionTransition(target ATNState, ruleIndex, actionIndex int, isCtxDependent bool) *ActionTransition { - - t := new(ActionTransition) - t.BaseTransition = NewBaseTransition(target) - - t.serializationType = TransitionACTION - t.ruleIndex = ruleIndex - t.actionIndex = actionIndex - t.isCtxDependent = isCtxDependent // e.g., $i ref in pred - t.isEpsilon = true - return t -} - -func (t *ActionTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return false -} - -func (t *ActionTransition) String() string { - return "action_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex) -} - -type SetTransition struct { - *BaseTransition -} - -func NewSetTransition(target ATNState, set *IntervalSet) *SetTransition { - - t := new(SetTransition) - t.BaseTransition = NewBaseTransition(target) - - t.serializationType = TransitionSET - if set != nil { - t.intervalSet = set - } else { - t.intervalSet = NewIntervalSet() - t.intervalSet.addOne(TokenInvalidType) - } - - return t -} - -func (t *SetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return t.intervalSet.contains(symbol) -} - -func (t *SetTransition) String() string { - return t.intervalSet.String() -} - -type NotSetTransition struct { - *SetTransition -} - -func NewNotSetTransition(target ATNState, set *IntervalSet) *NotSetTransition { - - t := new(NotSetTransition) - - t.SetTransition = NewSetTransition(target, set) - - t.serializationType = TransitionNOTSET - - return t -} - -func (t *NotSetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return symbol >= minVocabSymbol && symbol <= maxVocabSymbol && !t.intervalSet.contains(symbol) -} - -func (t *NotSetTransition) String() string { - return "~" + t.intervalSet.String() -} - -type WildcardTransition struct { - *BaseTransition -} - -func NewWildcardTransition(target ATNState) *WildcardTransition { - - t := new(WildcardTransition) - t.BaseTransition = NewBaseTransition(target) - - t.serializationType = TransitionWILDCARD - return t -} - -func (t *WildcardTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return symbol >= minVocabSymbol && symbol <= maxVocabSymbol -} - -func (t *WildcardTransition) String() string { - return "." -} - -type PrecedencePredicateTransition struct { - *BaseAbstractPredicateTransition - - precedence int -} - -func NewPrecedencePredicateTransition(target ATNState, precedence int) *PrecedencePredicateTransition { - - t := new(PrecedencePredicateTransition) - t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target) - - t.serializationType = TransitionPRECEDENCE - t.precedence = precedence - t.isEpsilon = true - - return t -} - -func (t *PrecedencePredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { - return false -} - -func (t *PrecedencePredicateTransition) getPredicate() *PrecedencePredicate { - return NewPrecedencePredicate(t.precedence) -} - -func (t *PrecedencePredicateTransition) String() string { - return fmt.Sprint(t.precedence) + " >= _p" -} +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" + "strings" +) + +// atom, set, epsilon, action, predicate, rule transitions. +// +//

This is a one way link. It emanates from a state (usually via a list of +// transitions) and has a target state.

+// +//

Since we never have to change the ATN transitions once we construct it, +// the states. We'll use the term Edge for the DFA to distinguish them from +// ATN transitions.

+ +type Transition interface { + getTarget() ATNState + setTarget(ATNState) + getIsEpsilon() bool + getLabel() *IntervalSet + getSerializationType() int + Matches(int, int, int) bool +} + +type BaseTransition struct { + target ATNState + isEpsilon bool + label int + intervalSet *IntervalSet + serializationType int +} + +func NewBaseTransition(target ATNState) *BaseTransition { + + if target == nil { + panic("target cannot be nil.") + } + + t := new(BaseTransition) + + t.target = target + // Are we epsilon, action, sempred? + t.isEpsilon = false + t.intervalSet = nil + + return t +} + +func (t *BaseTransition) getTarget() ATNState { + return t.target +} + +func (t *BaseTransition) setTarget(s ATNState) { + t.target = s +} + +func (t *BaseTransition) getIsEpsilon() bool { + return t.isEpsilon +} + +func (t *BaseTransition) getLabel() *IntervalSet { + return t.intervalSet +} + +func (t *BaseTransition) getSerializationType() int { + return t.serializationType +} + +func (t *BaseTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + panic("Not implemented") +} + +const ( + TransitionEPSILON = 1 + TransitionRANGE = 2 + TransitionRULE = 3 + TransitionPREDICATE = 4 // e.g., {isType(input.LT(1))}? + TransitionATOM = 5 + TransitionACTION = 6 + TransitionSET = 7 // ~(A|B) or ~atom, wildcard, which convert to next 2 + TransitionNOTSET = 8 + TransitionWILDCARD = 9 + TransitionPRECEDENCE = 10 +) + +var TransitionserializationNames = []string{ + "INVALID", + "EPSILON", + "RANGE", + "RULE", + "PREDICATE", + "ATOM", + "ACTION", + "SET", + "NOT_SET", + "WILDCARD", + "PRECEDENCE", +} + +//var TransitionserializationTypes struct { +// EpsilonTransition int +// RangeTransition int +// RuleTransition int +// PredicateTransition int +// AtomTransition int +// ActionTransition int +// SetTransition int +// NotSetTransition int +// WildcardTransition int +// PrecedencePredicateTransition int +//}{ +// TransitionEPSILON, +// TransitionRANGE, +// TransitionRULE, +// TransitionPREDICATE, +// TransitionATOM, +// TransitionACTION, +// TransitionSET, +// TransitionNOTSET, +// TransitionWILDCARD, +// TransitionPRECEDENCE +//} + +// TODO: make all transitions sets? no, should remove set edges +type AtomTransition struct { + *BaseTransition +} + +func NewAtomTransition(target ATNState, intervalSet int) *AtomTransition { + + t := new(AtomTransition) + t.BaseTransition = NewBaseTransition(target) + + t.label = intervalSet // The token type or character value or, signifies special intervalSet. + t.intervalSet = t.makeLabel() + t.serializationType = TransitionATOM + + return t +} + +func (t *AtomTransition) makeLabel() *IntervalSet { + s := NewIntervalSet() + s.addOne(t.label) + return s +} + +func (t *AtomTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + return t.label == symbol +} + +func (t *AtomTransition) String() string { + return strconv.Itoa(t.label) +} + +type RuleTransition struct { + *BaseTransition + + followState ATNState + ruleIndex, precedence int +} + +func NewRuleTransition(ruleStart ATNState, ruleIndex, precedence int, followState ATNState) *RuleTransition { + + t := new(RuleTransition) + t.BaseTransition = NewBaseTransition(ruleStart) + + t.ruleIndex = ruleIndex + t.precedence = precedence + t.followState = followState + t.serializationType = TransitionRULE + t.isEpsilon = true + + return t +} + +func (t *RuleTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + return false +} + +type EpsilonTransition struct { + *BaseTransition + + outermostPrecedenceReturn int +} + +func NewEpsilonTransition(target ATNState, outermostPrecedenceReturn int) *EpsilonTransition { + + t := new(EpsilonTransition) + t.BaseTransition = NewBaseTransition(target) + + t.serializationType = TransitionEPSILON + t.isEpsilon = true + t.outermostPrecedenceReturn = outermostPrecedenceReturn + return t +} + +func (t *EpsilonTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + return false +} + +func (t *EpsilonTransition) String() string { + return "epsilon" +} + +type RangeTransition struct { + *BaseTransition + + start, stop int +} + +func NewRangeTransition(target ATNState, start, stop int) *RangeTransition { + + t := new(RangeTransition) + t.BaseTransition = NewBaseTransition(target) + + t.serializationType = TransitionRANGE + t.start = start + t.stop = stop + t.intervalSet = t.makeLabel() + return t +} + +func (t *RangeTransition) makeLabel() *IntervalSet { + s := NewIntervalSet() + s.addRange(t.start, t.stop) + return s +} + +func (t *RangeTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + return symbol >= t.start && symbol <= t.stop +} + +func (t *RangeTransition) String() string { + var sb strings.Builder + sb.WriteByte('\'') + sb.WriteRune(rune(t.start)) + sb.WriteString("'..'") + sb.WriteRune(rune(t.stop)) + sb.WriteByte('\'') + return sb.String() +} + +type AbstractPredicateTransition interface { + Transition + IAbstractPredicateTransitionFoo() +} + +type BaseAbstractPredicateTransition struct { + *BaseTransition +} + +func NewBasePredicateTransition(target ATNState) *BaseAbstractPredicateTransition { + + t := new(BaseAbstractPredicateTransition) + t.BaseTransition = NewBaseTransition(target) + + return t +} + +func (a *BaseAbstractPredicateTransition) IAbstractPredicateTransitionFoo() {} + +type PredicateTransition struct { + *BaseAbstractPredicateTransition + + isCtxDependent bool + ruleIndex, predIndex int +} + +func NewPredicateTransition(target ATNState, ruleIndex, predIndex int, isCtxDependent bool) *PredicateTransition { + + t := new(PredicateTransition) + t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target) + + t.serializationType = TransitionPREDICATE + t.ruleIndex = ruleIndex + t.predIndex = predIndex + t.isCtxDependent = isCtxDependent // e.g., $i ref in pred + t.isEpsilon = true + return t +} + +func (t *PredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + return false +} + +func (t *PredicateTransition) getPredicate() *Predicate { + return NewPredicate(t.ruleIndex, t.predIndex, t.isCtxDependent) +} + +func (t *PredicateTransition) String() string { + return "pred_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.predIndex) +} + +type ActionTransition struct { + *BaseTransition + + isCtxDependent bool + ruleIndex, actionIndex, predIndex int +} + +func NewActionTransition(target ATNState, ruleIndex, actionIndex int, isCtxDependent bool) *ActionTransition { + + t := new(ActionTransition) + t.BaseTransition = NewBaseTransition(target) + + t.serializationType = TransitionACTION + t.ruleIndex = ruleIndex + t.actionIndex = actionIndex + t.isCtxDependent = isCtxDependent // e.g., $i ref in pred + t.isEpsilon = true + return t +} + +func (t *ActionTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + return false +} + +func (t *ActionTransition) String() string { + return "action_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex) +} + +type SetTransition struct { + *BaseTransition +} + +func NewSetTransition(target ATNState, set *IntervalSet) *SetTransition { + + t := new(SetTransition) + t.BaseTransition = NewBaseTransition(target) + + t.serializationType = TransitionSET + if set != nil { + t.intervalSet = set + } else { + t.intervalSet = NewIntervalSet() + t.intervalSet.addOne(TokenInvalidType) + } + + return t +} + +func (t *SetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + return t.intervalSet.contains(symbol) +} + +func (t *SetTransition) String() string { + return t.intervalSet.String() +} + +type NotSetTransition struct { + *SetTransition +} + +func NewNotSetTransition(target ATNState, set *IntervalSet) *NotSetTransition { + + t := new(NotSetTransition) + + t.SetTransition = NewSetTransition(target, set) + + t.serializationType = TransitionNOTSET + + return t +} + +func (t *NotSetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + return symbol >= minVocabSymbol && symbol <= maxVocabSymbol && !t.intervalSet.contains(symbol) +} + +func (t *NotSetTransition) String() string { + return "~" + t.intervalSet.String() +} + +type WildcardTransition struct { + *BaseTransition +} + +func NewWildcardTransition(target ATNState) *WildcardTransition { + + t := new(WildcardTransition) + t.BaseTransition = NewBaseTransition(target) + + t.serializationType = TransitionWILDCARD + return t +} + +func (t *WildcardTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + return symbol >= minVocabSymbol && symbol <= maxVocabSymbol +} + +func (t *WildcardTransition) String() string { + return "." +} + +type PrecedencePredicateTransition struct { + *BaseAbstractPredicateTransition + + precedence int +} + +func NewPrecedencePredicateTransition(target ATNState, precedence int) *PrecedencePredicateTransition { + + t := new(PrecedencePredicateTransition) + t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target) + + t.serializationType = TransitionPRECEDENCE + t.precedence = precedence + t.isEpsilon = true + + return t +} + +func (t *PrecedencePredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + return false +} + +func (t *PrecedencePredicateTransition) getPredicate() *PrecedencePredicate { + return NewPrecedencePredicate(t.precedence) +} + +func (t *PrecedencePredicateTransition) String() string { + return fmt.Sprint(t.precedence) + " >= _p" +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tree.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tree.go index 85b4f137b5..7b6ace613d 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tree.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/tree.go @@ -1,253 +1,253 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -// The basic notion of a tree has a parent, a payload, and a list of children. -// It is the most abstract interface for all the trees used by ANTLR. -/// - -var TreeInvalidInterval = NewInterval(-1, -2) - -type Tree interface { - GetParent() Tree - SetParent(Tree) - GetPayload() interface{} - GetChild(i int) Tree - GetChildCount() int - GetChildren() []Tree -} - -type SyntaxTree interface { - Tree - - GetSourceInterval() *Interval -} - -type ParseTree interface { - SyntaxTree - - Accept(Visitor ParseTreeVisitor) interface{} - GetText() string - - ToStringTree([]string, Recognizer) string -} - -type RuleNode interface { - ParseTree - - GetRuleContext() RuleContext - GetBaseRuleContext() *BaseRuleContext -} - -type TerminalNode interface { - ParseTree - - GetSymbol() Token -} - -type ErrorNode interface { - TerminalNode - - errorNode() -} - -type ParseTreeVisitor interface { - Visit(tree ParseTree) interface{} - VisitChildren(node RuleNode) interface{} - VisitTerminal(node TerminalNode) interface{} - VisitErrorNode(node ErrorNode) interface{} -} - -type BaseParseTreeVisitor struct{} - -var _ ParseTreeVisitor = &BaseParseTreeVisitor{} - -func (v *BaseParseTreeVisitor) Visit(tree ParseTree) interface{} { return tree.Accept(v) } -func (v *BaseParseTreeVisitor) VisitChildren(node RuleNode) interface{} { return nil } -func (v *BaseParseTreeVisitor) VisitTerminal(node TerminalNode) interface{} { return nil } -func (v *BaseParseTreeVisitor) VisitErrorNode(node ErrorNode) interface{} { return nil } - -// TODO -//func (this ParseTreeVisitor) Visit(ctx) { -// if (Utils.isArray(ctx)) { -// self := this -// return ctx.map(function(child) { return VisitAtom(self, child)}) -// } else { -// return VisitAtom(this, ctx) -// } -//} -// -//func VisitAtom(Visitor, ctx) { -// if (ctx.parser == nil) { //is terminal -// return -// } -// -// name := ctx.parser.ruleNames[ctx.ruleIndex] -// funcName := "Visit" + Utils.titleCase(name) -// -// return Visitor[funcName](ctx) -//} - -type ParseTreeListener interface { - VisitTerminal(node TerminalNode) - VisitErrorNode(node ErrorNode) - EnterEveryRule(ctx ParserRuleContext) - ExitEveryRule(ctx ParserRuleContext) -} - -type BaseParseTreeListener struct{} - -var _ ParseTreeListener = &BaseParseTreeListener{} - -func (l *BaseParseTreeListener) VisitTerminal(node TerminalNode) {} -func (l *BaseParseTreeListener) VisitErrorNode(node ErrorNode) {} -func (l *BaseParseTreeListener) EnterEveryRule(ctx ParserRuleContext) {} -func (l *BaseParseTreeListener) ExitEveryRule(ctx ParserRuleContext) {} - -type TerminalNodeImpl struct { - parentCtx RuleContext - - symbol Token -} - -var _ TerminalNode = &TerminalNodeImpl{} - -func NewTerminalNodeImpl(symbol Token) *TerminalNodeImpl { - tn := new(TerminalNodeImpl) - - tn.parentCtx = nil - tn.symbol = symbol - - return tn -} - -func (t *TerminalNodeImpl) GetChild(i int) Tree { - return nil -} - -func (t *TerminalNodeImpl) GetChildren() []Tree { - return nil -} - -func (t *TerminalNodeImpl) SetChildren(tree []Tree) { - panic("Cannot set children on terminal node") -} - -func (t *TerminalNodeImpl) GetSymbol() Token { - return t.symbol -} - -func (t *TerminalNodeImpl) GetParent() Tree { - return t.parentCtx -} - -func (t *TerminalNodeImpl) SetParent(tree Tree) { - t.parentCtx = tree.(RuleContext) -} - -func (t *TerminalNodeImpl) GetPayload() interface{} { - return t.symbol -} - -func (t *TerminalNodeImpl) GetSourceInterval() *Interval { - if t.symbol == nil { - return TreeInvalidInterval - } - tokenIndex := t.symbol.GetTokenIndex() - return NewInterval(tokenIndex, tokenIndex) -} - -func (t *TerminalNodeImpl) GetChildCount() int { - return 0 -} - -func (t *TerminalNodeImpl) Accept(v ParseTreeVisitor) interface{} { - return v.VisitTerminal(t) -} - -func (t *TerminalNodeImpl) GetText() string { - return t.symbol.GetText() -} - -func (t *TerminalNodeImpl) String() string { - if t.symbol.GetTokenType() == TokenEOF { - return "" - } - - return t.symbol.GetText() -} - -func (t *TerminalNodeImpl) ToStringTree(s []string, r Recognizer) string { - return t.String() -} - -// Represents a token that was consumed during reSynchronization -// rather than during a valid Match operation. For example, -// we will create this kind of a node during single token insertion -// and deletion as well as during "consume until error recovery set" -// upon no viable alternative exceptions. - -type ErrorNodeImpl struct { - *TerminalNodeImpl -} - -var _ ErrorNode = &ErrorNodeImpl{} - -func NewErrorNodeImpl(token Token) *ErrorNodeImpl { - en := new(ErrorNodeImpl) - en.TerminalNodeImpl = NewTerminalNodeImpl(token) - return en -} - -func (e *ErrorNodeImpl) errorNode() {} - -func (e *ErrorNodeImpl) Accept(v ParseTreeVisitor) interface{} { - return v.VisitErrorNode(e) -} - -type ParseTreeWalker struct { -} - -func NewParseTreeWalker() *ParseTreeWalker { - return new(ParseTreeWalker) -} - -// Performs a walk on the given parse tree starting at the root and going down recursively -// with depth-first search. On each node, EnterRule is called before -// recursively walking down into child nodes, then -// ExitRule is called after the recursive call to wind up. -func (p *ParseTreeWalker) Walk(listener ParseTreeListener, t Tree) { - switch tt := t.(type) { - case ErrorNode: - listener.VisitErrorNode(tt) - case TerminalNode: - listener.VisitTerminal(tt) - default: - p.EnterRule(listener, t.(RuleNode)) - for i := 0; i < t.GetChildCount(); i++ { - child := t.GetChild(i) - p.Walk(listener, child) - } - p.ExitRule(listener, t.(RuleNode)) - } -} - -// Enters a grammar rule by first triggering the generic event {@link ParseTreeListener//EnterEveryRule} -// then by triggering the event specific to the given parse tree node -func (p *ParseTreeWalker) EnterRule(listener ParseTreeListener, r RuleNode) { - ctx := r.GetRuleContext().(ParserRuleContext) - listener.EnterEveryRule(ctx) - ctx.EnterRule(listener) -} - -// Exits a grammar rule by first triggering the event specific to the given parse tree node -// then by triggering the generic event {@link ParseTreeListener//ExitEveryRule} -func (p *ParseTreeWalker) ExitRule(listener ParseTreeListener, r RuleNode) { - ctx := r.GetRuleContext().(ParserRuleContext) - ctx.ExitRule(listener) - listener.ExitEveryRule(ctx) -} - -var ParseTreeWalkerDefault = NewParseTreeWalker() +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// The basic notion of a tree has a parent, a payload, and a list of children. +// It is the most abstract interface for all the trees used by ANTLR. +/// + +var TreeInvalidInterval = NewInterval(-1, -2) + +type Tree interface { + GetParent() Tree + SetParent(Tree) + GetPayload() interface{} + GetChild(i int) Tree + GetChildCount() int + GetChildren() []Tree +} + +type SyntaxTree interface { + Tree + + GetSourceInterval() *Interval +} + +type ParseTree interface { + SyntaxTree + + Accept(Visitor ParseTreeVisitor) interface{} + GetText() string + + ToStringTree([]string, Recognizer) string +} + +type RuleNode interface { + ParseTree + + GetRuleContext() RuleContext + GetBaseRuleContext() *BaseRuleContext +} + +type TerminalNode interface { + ParseTree + + GetSymbol() Token +} + +type ErrorNode interface { + TerminalNode + + errorNode() +} + +type ParseTreeVisitor interface { + Visit(tree ParseTree) interface{} + VisitChildren(node RuleNode) interface{} + VisitTerminal(node TerminalNode) interface{} + VisitErrorNode(node ErrorNode) interface{} +} + +type BaseParseTreeVisitor struct{} + +var _ ParseTreeVisitor = &BaseParseTreeVisitor{} + +func (v *BaseParseTreeVisitor) Visit(tree ParseTree) interface{} { return tree.Accept(v) } +func (v *BaseParseTreeVisitor) VisitChildren(node RuleNode) interface{} { return nil } +func (v *BaseParseTreeVisitor) VisitTerminal(node TerminalNode) interface{} { return nil } +func (v *BaseParseTreeVisitor) VisitErrorNode(node ErrorNode) interface{} { return nil } + +// TODO +//func (this ParseTreeVisitor) Visit(ctx) { +// if (Utils.isArray(ctx)) { +// self := this +// return ctx.map(function(child) { return VisitAtom(self, child)}) +// } else { +// return VisitAtom(this, ctx) +// } +//} +// +//func VisitAtom(Visitor, ctx) { +// if (ctx.parser == nil) { //is terminal +// return +// } +// +// name := ctx.parser.ruleNames[ctx.ruleIndex] +// funcName := "Visit" + Utils.titleCase(name) +// +// return Visitor[funcName](ctx) +//} + +type ParseTreeListener interface { + VisitTerminal(node TerminalNode) + VisitErrorNode(node ErrorNode) + EnterEveryRule(ctx ParserRuleContext) + ExitEveryRule(ctx ParserRuleContext) +} + +type BaseParseTreeListener struct{} + +var _ ParseTreeListener = &BaseParseTreeListener{} + +func (l *BaseParseTreeListener) VisitTerminal(node TerminalNode) {} +func (l *BaseParseTreeListener) VisitErrorNode(node ErrorNode) {} +func (l *BaseParseTreeListener) EnterEveryRule(ctx ParserRuleContext) {} +func (l *BaseParseTreeListener) ExitEveryRule(ctx ParserRuleContext) {} + +type TerminalNodeImpl struct { + parentCtx RuleContext + + symbol Token +} + +var _ TerminalNode = &TerminalNodeImpl{} + +func NewTerminalNodeImpl(symbol Token) *TerminalNodeImpl { + tn := new(TerminalNodeImpl) + + tn.parentCtx = nil + tn.symbol = symbol + + return tn +} + +func (t *TerminalNodeImpl) GetChild(i int) Tree { + return nil +} + +func (t *TerminalNodeImpl) GetChildren() []Tree { + return nil +} + +func (t *TerminalNodeImpl) SetChildren(tree []Tree) { + panic("Cannot set children on terminal node") +} + +func (t *TerminalNodeImpl) GetSymbol() Token { + return t.symbol +} + +func (t *TerminalNodeImpl) GetParent() Tree { + return t.parentCtx +} + +func (t *TerminalNodeImpl) SetParent(tree Tree) { + t.parentCtx = tree.(RuleContext) +} + +func (t *TerminalNodeImpl) GetPayload() interface{} { + return t.symbol +} + +func (t *TerminalNodeImpl) GetSourceInterval() *Interval { + if t.symbol == nil { + return TreeInvalidInterval + } + tokenIndex := t.symbol.GetTokenIndex() + return NewInterval(tokenIndex, tokenIndex) +} + +func (t *TerminalNodeImpl) GetChildCount() int { + return 0 +} + +func (t *TerminalNodeImpl) Accept(v ParseTreeVisitor) interface{} { + return v.VisitTerminal(t) +} + +func (t *TerminalNodeImpl) GetText() string { + return t.symbol.GetText() +} + +func (t *TerminalNodeImpl) String() string { + if t.symbol.GetTokenType() == TokenEOF { + return "" + } + + return t.symbol.GetText() +} + +func (t *TerminalNodeImpl) ToStringTree(s []string, r Recognizer) string { + return t.String() +} + +// Represents a token that was consumed during reSynchronization +// rather than during a valid Match operation. For example, +// we will create this kind of a node during single token insertion +// and deletion as well as during "consume until error recovery set" +// upon no viable alternative exceptions. + +type ErrorNodeImpl struct { + *TerminalNodeImpl +} + +var _ ErrorNode = &ErrorNodeImpl{} + +func NewErrorNodeImpl(token Token) *ErrorNodeImpl { + en := new(ErrorNodeImpl) + en.TerminalNodeImpl = NewTerminalNodeImpl(token) + return en +} + +func (e *ErrorNodeImpl) errorNode() {} + +func (e *ErrorNodeImpl) Accept(v ParseTreeVisitor) interface{} { + return v.VisitErrorNode(e) +} + +type ParseTreeWalker struct { +} + +func NewParseTreeWalker() *ParseTreeWalker { + return new(ParseTreeWalker) +} + +// Performs a walk on the given parse tree starting at the root and going down recursively +// with depth-first search. On each node, EnterRule is called before +// recursively walking down into child nodes, then +// ExitRule is called after the recursive call to wind up. +func (p *ParseTreeWalker) Walk(listener ParseTreeListener, t Tree) { + switch tt := t.(type) { + case ErrorNode: + listener.VisitErrorNode(tt) + case TerminalNode: + listener.VisitTerminal(tt) + default: + p.EnterRule(listener, t.(RuleNode)) + for i := 0; i < t.GetChildCount(); i++ { + child := t.GetChild(i) + p.Walk(listener, child) + } + p.ExitRule(listener, t.(RuleNode)) + } +} + +// Enters a grammar rule by first triggering the generic event {@link ParseTreeListener//EnterEveryRule} +// then by triggering the event specific to the given parse tree node +func (p *ParseTreeWalker) EnterRule(listener ParseTreeListener, r RuleNode) { + ctx := r.GetRuleContext().(ParserRuleContext) + listener.EnterEveryRule(ctx) + ctx.EnterRule(listener) +} + +// Exits a grammar rule by first triggering the event specific to the given parse tree node +// then by triggering the generic event {@link ParseTreeListener//ExitEveryRule} +func (p *ParseTreeWalker) ExitRule(listener ParseTreeListener, r RuleNode) { + ctx := r.GetRuleContext().(ParserRuleContext) + ctx.ExitRule(listener) + listener.ExitEveryRule(ctx) +} + +var ParseTreeWalkerDefault = NewParseTreeWalker() diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trees.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trees.go index d7dbb03228..9304730ae0 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trees.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/trees.go @@ -1,138 +1,138 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import "fmt" - -/** A set of utility routines useful for all kinds of ANTLR trees. */ - -// Print out a whole tree in LISP form. {@link //getNodeText} is used on the -// -// node payloads to get the text for the nodes. Detect -// parse trees and extract data appropriately. -func TreesStringTree(tree Tree, ruleNames []string, recog Recognizer) string { - - if recog != nil { - ruleNames = recog.GetRuleNames() - } - - s := TreesGetNodeText(tree, ruleNames, nil) - - s = EscapeWhitespace(s, false) - c := tree.GetChildCount() - if c == 0 { - return s - } - res := "(" + s + " " - if c > 0 { - s = TreesStringTree(tree.GetChild(0), ruleNames, nil) - res += s - } - for i := 1; i < c; i++ { - s = TreesStringTree(tree.GetChild(i), ruleNames, nil) - res += (" " + s) - } - res += ")" - return res -} - -func TreesGetNodeText(t Tree, ruleNames []string, recog Parser) string { - if recog != nil { - ruleNames = recog.GetRuleNames() - } - - if ruleNames != nil { - switch t2 := t.(type) { - case RuleNode: - t3 := t2.GetRuleContext() - altNumber := t3.GetAltNumber() - - if altNumber != ATNInvalidAltNumber { - return fmt.Sprintf("%s:%d", ruleNames[t3.GetRuleIndex()], altNumber) - } - return ruleNames[t3.GetRuleIndex()] - case ErrorNode: - return fmt.Sprint(t2) - case TerminalNode: - if t2.GetSymbol() != nil { - return t2.GetSymbol().GetText() - } - } - } - - // no recog for rule names - payload := t.GetPayload() - if p2, ok := payload.(Token); ok { - return p2.GetText() - } - - return fmt.Sprint(t.GetPayload()) -} - -// Return ordered list of all children of this node -func TreesGetChildren(t Tree) []Tree { - list := make([]Tree, 0) - for i := 0; i < t.GetChildCount(); i++ { - list = append(list, t.GetChild(i)) - } - return list -} - -// Return a list of all ancestors of this node. The first node of -// -// list is the root and the last is the parent of this node. -func TreesgetAncestors(t Tree) []Tree { - ancestors := make([]Tree, 0) - t = t.GetParent() - for t != nil { - f := []Tree{t} - ancestors = append(f, ancestors...) - t = t.GetParent() - } - return ancestors -} - -func TreesFindAllTokenNodes(t ParseTree, ttype int) []ParseTree { - return TreesfindAllNodes(t, ttype, true) -} - -func TreesfindAllRuleNodes(t ParseTree, ruleIndex int) []ParseTree { - return TreesfindAllNodes(t, ruleIndex, false) -} - -func TreesfindAllNodes(t ParseTree, index int, findTokens bool) []ParseTree { - nodes := make([]ParseTree, 0) - treesFindAllNodes(t, index, findTokens, &nodes) - return nodes -} - -func treesFindAllNodes(t ParseTree, index int, findTokens bool, nodes *[]ParseTree) { - // check this node (the root) first - - t2, ok := t.(TerminalNode) - t3, ok2 := t.(ParserRuleContext) - - if findTokens && ok { - if t2.GetSymbol().GetTokenType() == index { - *nodes = append(*nodes, t2) - } - } else if !findTokens && ok2 { - if t3.GetRuleIndex() == index { - *nodes = append(*nodes, t3) - } - } - // check children - for i := 0; i < t.GetChildCount(); i++ { - treesFindAllNodes(t.GetChild(i).(ParseTree), index, findTokens, nodes) - } -} - -func TreesDescendants(t ParseTree) []ParseTree { - nodes := []ParseTree{t} - for i := 0; i < t.GetChildCount(); i++ { - nodes = append(nodes, TreesDescendants(t.GetChild(i).(ParseTree))...) - } - return nodes -} +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import "fmt" + +/** A set of utility routines useful for all kinds of ANTLR trees. */ + +// Print out a whole tree in LISP form. {@link //getNodeText} is used on the +// +// node payloads to get the text for the nodes. Detect +// parse trees and extract data appropriately. +func TreesStringTree(tree Tree, ruleNames []string, recog Recognizer) string { + + if recog != nil { + ruleNames = recog.GetRuleNames() + } + + s := TreesGetNodeText(tree, ruleNames, nil) + + s = EscapeWhitespace(s, false) + c := tree.GetChildCount() + if c == 0 { + return s + } + res := "(" + s + " " + if c > 0 { + s = TreesStringTree(tree.GetChild(0), ruleNames, nil) + res += s + } + for i := 1; i < c; i++ { + s = TreesStringTree(tree.GetChild(i), ruleNames, nil) + res += (" " + s) + } + res += ")" + return res +} + +func TreesGetNodeText(t Tree, ruleNames []string, recog Parser) string { + if recog != nil { + ruleNames = recog.GetRuleNames() + } + + if ruleNames != nil { + switch t2 := t.(type) { + case RuleNode: + t3 := t2.GetRuleContext() + altNumber := t3.GetAltNumber() + + if altNumber != ATNInvalidAltNumber { + return fmt.Sprintf("%s:%d", ruleNames[t3.GetRuleIndex()], altNumber) + } + return ruleNames[t3.GetRuleIndex()] + case ErrorNode: + return fmt.Sprint(t2) + case TerminalNode: + if t2.GetSymbol() != nil { + return t2.GetSymbol().GetText() + } + } + } + + // no recog for rule names + payload := t.GetPayload() + if p2, ok := payload.(Token); ok { + return p2.GetText() + } + + return fmt.Sprint(t.GetPayload()) +} + +// Return ordered list of all children of this node +func TreesGetChildren(t Tree) []Tree { + list := make([]Tree, 0) + for i := 0; i < t.GetChildCount(); i++ { + list = append(list, t.GetChild(i)) + } + return list +} + +// Return a list of all ancestors of this node. The first node of +// +// list is the root and the last is the parent of this node. +func TreesgetAncestors(t Tree) []Tree { + ancestors := make([]Tree, 0) + t = t.GetParent() + for t != nil { + f := []Tree{t} + ancestors = append(f, ancestors...) + t = t.GetParent() + } + return ancestors +} + +func TreesFindAllTokenNodes(t ParseTree, ttype int) []ParseTree { + return TreesfindAllNodes(t, ttype, true) +} + +func TreesfindAllRuleNodes(t ParseTree, ruleIndex int) []ParseTree { + return TreesfindAllNodes(t, ruleIndex, false) +} + +func TreesfindAllNodes(t ParseTree, index int, findTokens bool) []ParseTree { + nodes := make([]ParseTree, 0) + treesFindAllNodes(t, index, findTokens, &nodes) + return nodes +} + +func treesFindAllNodes(t ParseTree, index int, findTokens bool, nodes *[]ParseTree) { + // check this node (the root) first + + t2, ok := t.(TerminalNode) + t3, ok2 := t.(ParserRuleContext) + + if findTokens && ok { + if t2.GetSymbol().GetTokenType() == index { + *nodes = append(*nodes, t2) + } + } else if !findTokens && ok2 { + if t3.GetRuleIndex() == index { + *nodes = append(*nodes, t3) + } + } + // check children + for i := 0; i < t.GetChildCount(); i++ { + treesFindAllNodes(t.GetChild(i).(ParseTree), index, findTokens, nodes) + } +} + +func TreesDescendants(t ParseTree) []ParseTree { + nodes := []ParseTree{t} + for i := 0; i < t.GetChildCount(); i++ { + nodes = append(nodes, TreesDescendants(t.GetChild(i).(ParseTree))...) + } + return nodes +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils.go index 9fad5d916b..2220fcd24f 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils.go @@ -1,352 +1,352 @@ -// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. -// Use of this file is governed by the BSD 3-clause license that -// can be found in the LICENSE.txt file in the project root. - -package antlr - -import ( - "bytes" - "errors" - "fmt" - "math/bits" - "strconv" - "strings" -) - -func intMin(a, b int) int { - if a < b { - return a - } - return b -} - -func intMax(a, b int) int { - if a > b { - return a - } - return b -} - -// A simple integer stack - -type IntStack []int - -var ErrEmptyStack = errors.New("Stack is empty") - -func (s *IntStack) Pop() (int, error) { - l := len(*s) - 1 - if l < 0 { - return 0, ErrEmptyStack - } - v := (*s)[l] - *s = (*s)[0:l] - return v, nil -} - -func (s *IntStack) Push(e int) { - *s = append(*s, e) -} - -type comparable interface { - Equals(other Collectable[any]) bool -} - -func standardEqualsFunction(a Collectable[any], b Collectable[any]) bool { - - return a.Equals(b) -} - -func standardHashFunction(a interface{}) int { - if h, ok := a.(hasher); ok { - return h.Hash() - } - - panic("Not Hasher") -} - -type hasher interface { - Hash() int -} - -const bitsPerWord = 64 - -func indexForBit(bit int) int { - return bit / bitsPerWord -} - -func wordForBit(data []uint64, bit int) uint64 { - idx := indexForBit(bit) - if idx >= len(data) { - return 0 - } - return data[idx] -} - -func maskForBit(bit int) uint64 { - return uint64(1) << (bit % bitsPerWord) -} - -func wordsNeeded(bit int) int { - return indexForBit(bit) + 1 -} - -type BitSet struct { - data []uint64 -} - -func NewBitSet() *BitSet { - return &BitSet{} -} - -func (b *BitSet) add(value int) { - idx := indexForBit(value) - if idx >= len(b.data) { - size := wordsNeeded(value) - data := make([]uint64, size) - copy(data, b.data) - b.data = data - } - b.data[idx] |= maskForBit(value) -} - -func (b *BitSet) clear(index int) { - idx := indexForBit(index) - if idx >= len(b.data) { - return - } - b.data[idx] &= ^maskForBit(index) -} - -func (b *BitSet) or(set *BitSet) { - // Get min size necessary to represent the bits in both sets. - bLen := b.minLen() - setLen := set.minLen() - maxLen := intMax(bLen, setLen) - if maxLen > len(b.data) { - // Increase the size of len(b.data) to repesent the bits in both sets. - data := make([]uint64, maxLen) - copy(data, b.data) - b.data = data - } - // len(b.data) is at least setLen. - for i := 0; i < setLen; i++ { - b.data[i] |= set.data[i] - } -} - -func (b *BitSet) remove(value int) { - b.clear(value) -} - -func (b *BitSet) contains(value int) bool { - idx := indexForBit(value) - if idx >= len(b.data) { - return false - } - return (b.data[idx] & maskForBit(value)) != 0 -} - -func (b *BitSet) minValue() int { - for i, v := range b.data { - if v == 0 { - continue - } - return i*bitsPerWord + bits.TrailingZeros64(v) - } - return 2147483647 -} - -func (b *BitSet) equals(other interface{}) bool { - otherBitSet, ok := other.(*BitSet) - if !ok { - return false - } - - if b == otherBitSet { - return true - } - - // We only compare set bits, so we cannot rely on the two slices having the same size. Its - // possible for two BitSets to have different slice lengths but the same set bits. So we only - // compare the relevant words and ignore the trailing zeros. - bLen := b.minLen() - otherLen := otherBitSet.minLen() - - if bLen != otherLen { - return false - } - - for i := 0; i < bLen; i++ { - if b.data[i] != otherBitSet.data[i] { - return false - } - } - - return true -} - -func (b *BitSet) minLen() int { - for i := len(b.data); i > 0; i-- { - if b.data[i-1] != 0 { - return i - } - } - return 0 -} - -func (b *BitSet) length() int { - cnt := 0 - for _, val := range b.data { - cnt += bits.OnesCount64(val) - } - return cnt -} - -func (b *BitSet) String() string { - vals := make([]string, 0, b.length()) - - for i, v := range b.data { - for v != 0 { - n := bits.TrailingZeros64(v) - vals = append(vals, strconv.Itoa(i*bitsPerWord+n)) - v &= ^(uint64(1) << n) - } - } - - return "{" + strings.Join(vals, ", ") + "}" -} - -type AltDict struct { - data map[string]interface{} -} - -func NewAltDict() *AltDict { - d := new(AltDict) - d.data = make(map[string]interface{}) - return d -} - -func (a *AltDict) Get(key string) interface{} { - key = "k-" + key - return a.data[key] -} - -func (a *AltDict) put(key string, value interface{}) { - key = "k-" + key - a.data[key] = value -} - -func (a *AltDict) values() []interface{} { - vs := make([]interface{}, len(a.data)) - i := 0 - for _, v := range a.data { - vs[i] = v - i++ - } - return vs -} - -type DoubleDict struct { - data map[int]map[int]interface{} -} - -func NewDoubleDict() *DoubleDict { - dd := new(DoubleDict) - dd.data = make(map[int]map[int]interface{}) - return dd -} - -func (d *DoubleDict) Get(a, b int) interface{} { - data := d.data[a] - - if data == nil { - return nil - } - - return data[b] -} - -func (d *DoubleDict) set(a, b int, o interface{}) { - data := d.data[a] - - if data == nil { - data = make(map[int]interface{}) - d.data[a] = data - } - - data[b] = o -} - -func EscapeWhitespace(s string, escapeSpaces bool) string { - - s = strings.Replace(s, "\t", "\\t", -1) - s = strings.Replace(s, "\n", "\\n", -1) - s = strings.Replace(s, "\r", "\\r", -1) - if escapeSpaces { - s = strings.Replace(s, " ", "\u00B7", -1) - } - return s -} - -func TerminalNodeToStringArray(sa []TerminalNode) []string { - st := make([]string, len(sa)) - - for i, s := range sa { - st[i] = fmt.Sprintf("%v", s) - } - - return st -} - -func PrintArrayJavaStyle(sa []string) string { - var buffer bytes.Buffer - - buffer.WriteString("[") - - for i, s := range sa { - buffer.WriteString(s) - if i != len(sa)-1 { - buffer.WriteString(", ") - } - } - - buffer.WriteString("]") - - return buffer.String() -} - -// murmur hash -func murmurInit(seed int) int { - return seed -} - -func murmurUpdate(h int, value int) int { - const c1 uint32 = 0xCC9E2D51 - const c2 uint32 = 0x1B873593 - const r1 uint32 = 15 - const r2 uint32 = 13 - const m uint32 = 5 - const n uint32 = 0xE6546B64 - - k := uint32(value) - k *= c1 - k = (k << r1) | (k >> (32 - r1)) - k *= c2 - - hash := uint32(h) ^ k - hash = (hash << r2) | (hash >> (32 - r2)) - hash = hash*m + n - return int(hash) -} - -func murmurFinish(h int, numberOfWords int) int { - var hash = uint32(h) - hash ^= uint32(numberOfWords) << 2 - hash ^= hash >> 16 - hash *= 0x85ebca6b - hash ^= hash >> 13 - hash *= 0xc2b2ae35 - hash ^= hash >> 16 - - return int(hash) -} +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "bytes" + "errors" + "fmt" + "math/bits" + "strconv" + "strings" +) + +func intMin(a, b int) int { + if a < b { + return a + } + return b +} + +func intMax(a, b int) int { + if a > b { + return a + } + return b +} + +// A simple integer stack + +type IntStack []int + +var ErrEmptyStack = errors.New("Stack is empty") + +func (s *IntStack) Pop() (int, error) { + l := len(*s) - 1 + if l < 0 { + return 0, ErrEmptyStack + } + v := (*s)[l] + *s = (*s)[0:l] + return v, nil +} + +func (s *IntStack) Push(e int) { + *s = append(*s, e) +} + +type comparable interface { + Equals(other Collectable[any]) bool +} + +func standardEqualsFunction(a Collectable[any], b Collectable[any]) bool { + + return a.Equals(b) +} + +func standardHashFunction(a interface{}) int { + if h, ok := a.(hasher); ok { + return h.Hash() + } + + panic("Not Hasher") +} + +type hasher interface { + Hash() int +} + +const bitsPerWord = 64 + +func indexForBit(bit int) int { + return bit / bitsPerWord +} + +func wordForBit(data []uint64, bit int) uint64 { + idx := indexForBit(bit) + if idx >= len(data) { + return 0 + } + return data[idx] +} + +func maskForBit(bit int) uint64 { + return uint64(1) << (bit % bitsPerWord) +} + +func wordsNeeded(bit int) int { + return indexForBit(bit) + 1 +} + +type BitSet struct { + data []uint64 +} + +func NewBitSet() *BitSet { + return &BitSet{} +} + +func (b *BitSet) add(value int) { + idx := indexForBit(value) + if idx >= len(b.data) { + size := wordsNeeded(value) + data := make([]uint64, size) + copy(data, b.data) + b.data = data + } + b.data[idx] |= maskForBit(value) +} + +func (b *BitSet) clear(index int) { + idx := indexForBit(index) + if idx >= len(b.data) { + return + } + b.data[idx] &= ^maskForBit(index) +} + +func (b *BitSet) or(set *BitSet) { + // Get min size necessary to represent the bits in both sets. + bLen := b.minLen() + setLen := set.minLen() + maxLen := intMax(bLen, setLen) + if maxLen > len(b.data) { + // Increase the size of len(b.data) to repesent the bits in both sets. + data := make([]uint64, maxLen) + copy(data, b.data) + b.data = data + } + // len(b.data) is at least setLen. + for i := 0; i < setLen; i++ { + b.data[i] |= set.data[i] + } +} + +func (b *BitSet) remove(value int) { + b.clear(value) +} + +func (b *BitSet) contains(value int) bool { + idx := indexForBit(value) + if idx >= len(b.data) { + return false + } + return (b.data[idx] & maskForBit(value)) != 0 +} + +func (b *BitSet) minValue() int { + for i, v := range b.data { + if v == 0 { + continue + } + return i*bitsPerWord + bits.TrailingZeros64(v) + } + return 2147483647 +} + +func (b *BitSet) equals(other interface{}) bool { + otherBitSet, ok := other.(*BitSet) + if !ok { + return false + } + + if b == otherBitSet { + return true + } + + // We only compare set bits, so we cannot rely on the two slices having the same size. Its + // possible for two BitSets to have different slice lengths but the same set bits. So we only + // compare the relevant words and ignore the trailing zeros. + bLen := b.minLen() + otherLen := otherBitSet.minLen() + + if bLen != otherLen { + return false + } + + for i := 0; i < bLen; i++ { + if b.data[i] != otherBitSet.data[i] { + return false + } + } + + return true +} + +func (b *BitSet) minLen() int { + for i := len(b.data); i > 0; i-- { + if b.data[i-1] != 0 { + return i + } + } + return 0 +} + +func (b *BitSet) length() int { + cnt := 0 + for _, val := range b.data { + cnt += bits.OnesCount64(val) + } + return cnt +} + +func (b *BitSet) String() string { + vals := make([]string, 0, b.length()) + + for i, v := range b.data { + for v != 0 { + n := bits.TrailingZeros64(v) + vals = append(vals, strconv.Itoa(i*bitsPerWord+n)) + v &= ^(uint64(1) << n) + } + } + + return "{" + strings.Join(vals, ", ") + "}" +} + +type AltDict struct { + data map[string]interface{} +} + +func NewAltDict() *AltDict { + d := new(AltDict) + d.data = make(map[string]interface{}) + return d +} + +func (a *AltDict) Get(key string) interface{} { + key = "k-" + key + return a.data[key] +} + +func (a *AltDict) put(key string, value interface{}) { + key = "k-" + key + a.data[key] = value +} + +func (a *AltDict) values() []interface{} { + vs := make([]interface{}, len(a.data)) + i := 0 + for _, v := range a.data { + vs[i] = v + i++ + } + return vs +} + +type DoubleDict struct { + data map[int]map[int]interface{} +} + +func NewDoubleDict() *DoubleDict { + dd := new(DoubleDict) + dd.data = make(map[int]map[int]interface{}) + return dd +} + +func (d *DoubleDict) Get(a, b int) interface{} { + data := d.data[a] + + if data == nil { + return nil + } + + return data[b] +} + +func (d *DoubleDict) set(a, b int, o interface{}) { + data := d.data[a] + + if data == nil { + data = make(map[int]interface{}) + d.data[a] = data + } + + data[b] = o +} + +func EscapeWhitespace(s string, escapeSpaces bool) string { + + s = strings.Replace(s, "\t", "\\t", -1) + s = strings.Replace(s, "\n", "\\n", -1) + s = strings.Replace(s, "\r", "\\r", -1) + if escapeSpaces { + s = strings.Replace(s, " ", "\u00B7", -1) + } + return s +} + +func TerminalNodeToStringArray(sa []TerminalNode) []string { + st := make([]string, len(sa)) + + for i, s := range sa { + st[i] = fmt.Sprintf("%v", s) + } + + return st +} + +func PrintArrayJavaStyle(sa []string) string { + var buffer bytes.Buffer + + buffer.WriteString("[") + + for i, s := range sa { + buffer.WriteString(s) + if i != len(sa)-1 { + buffer.WriteString(", ") + } + } + + buffer.WriteString("]") + + return buffer.String() +} + +// murmur hash +func murmurInit(seed int) int { + return seed +} + +func murmurUpdate(h int, value int) int { + const c1 uint32 = 0xCC9E2D51 + const c2 uint32 = 0x1B873593 + const r1 uint32 = 15 + const r2 uint32 = 13 + const m uint32 = 5 + const n uint32 = 0xE6546B64 + + k := uint32(value) + k *= c1 + k = (k << r1) | (k >> (32 - r1)) + k *= c2 + + hash := uint32(h) ^ k + hash = (hash << r2) | (hash >> (32 - r2)) + hash = hash*m + n + return int(hash) +} + +func murmurFinish(h int, numberOfWords int) int { + var hash = uint32(h) + hash ^= uint32(numberOfWords) << 2 + hash ^= hash >> 16 + hash *= 0x85ebca6b + hash ^= hash >> 13 + hash *= 0xc2b2ae35 + hash ^= hash >> 16 + + return int(hash) +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils_set.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils_set.go index c9bd6751e3..855d6f1cb7 100644 --- a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils_set.go +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/v4/utils_set.go @@ -1,235 +1,235 @@ -package antlr - -import "math" - -const ( - _initalCapacity = 16 - _initalBucketCapacity = 8 - _loadFactor = 0.75 -) - -type Set interface { - Add(value interface{}) (added interface{}) - Len() int - Get(value interface{}) (found interface{}) - Contains(value interface{}) bool - Values() []interface{} - Each(f func(interface{}) bool) -} - -type array2DHashSet struct { - buckets [][]Collectable[any] - hashcodeFunction func(interface{}) int - equalsFunction func(Collectable[any], Collectable[any]) bool - - n int // How many elements in set - threshold int // when to expand - - currentPrime int // jump by 4 primes each expand or whatever - initialBucketCapacity int -} - -func (as *array2DHashSet) Each(f func(interface{}) bool) { - if as.Len() < 1 { - return - } - - for _, bucket := range as.buckets { - for _, o := range bucket { - if o == nil { - break - } - if !f(o) { - return - } - } - } -} - -func (as *array2DHashSet) Values() []interface{} { - if as.Len() < 1 { - return nil - } - - values := make([]interface{}, 0, as.Len()) - as.Each(func(i interface{}) bool { - values = append(values, i) - return true - }) - return values -} - -func (as *array2DHashSet) Contains(value Collectable[any]) bool { - return as.Get(value) != nil -} - -func (as *array2DHashSet) Add(value Collectable[any]) interface{} { - if as.n > as.threshold { - as.expand() - } - return as.innerAdd(value) -} - -func (as *array2DHashSet) expand() { - old := as.buckets - - as.currentPrime += 4 - - var ( - newCapacity = len(as.buckets) << 1 - newTable = as.createBuckets(newCapacity) - newBucketLengths = make([]int, len(newTable)) - ) - - as.buckets = newTable - as.threshold = int(float64(newCapacity) * _loadFactor) - - for _, bucket := range old { - if bucket == nil { - continue - } - - for _, o := range bucket { - if o == nil { - break - } - - b := as.getBuckets(o) - bucketLength := newBucketLengths[b] - var newBucket []Collectable[any] - if bucketLength == 0 { - // new bucket - newBucket = as.createBucket(as.initialBucketCapacity) - newTable[b] = newBucket - } else { - newBucket = newTable[b] - if bucketLength == len(newBucket) { - // expand - newBucketCopy := make([]Collectable[any], len(newBucket)<<1) - copy(newBucketCopy[:bucketLength], newBucket) - newBucket = newBucketCopy - newTable[b] = newBucket - } - } - - newBucket[bucketLength] = o - newBucketLengths[b]++ - } - } -} - -func (as *array2DHashSet) Len() int { - return as.n -} - -func (as *array2DHashSet) Get(o Collectable[any]) interface{} { - if o == nil { - return nil - } - - b := as.getBuckets(o) - bucket := as.buckets[b] - if bucket == nil { // no bucket - return nil - } - - for _, e := range bucket { - if e == nil { - return nil // empty slot; not there - } - if as.equalsFunction(e, o) { - return e - } - } - - return nil -} - -func (as *array2DHashSet) innerAdd(o Collectable[any]) interface{} { - b := as.getBuckets(o) - - bucket := as.buckets[b] - - // new bucket - if bucket == nil { - bucket = as.createBucket(as.initialBucketCapacity) - bucket[0] = o - - as.buckets[b] = bucket - as.n++ - return o - } - - // look for it in bucket - for i := 0; i < len(bucket); i++ { - existing := bucket[i] - if existing == nil { // empty slot; not there, add. - bucket[i] = o - as.n++ - return o - } - - if as.equalsFunction(existing, o) { // found existing, quit - return existing - } - } - - // full bucket, expand and add to end - oldLength := len(bucket) - bucketCopy := make([]Collectable[any], oldLength<<1) - copy(bucketCopy[:oldLength], bucket) - bucket = bucketCopy - as.buckets[b] = bucket - bucket[oldLength] = o - as.n++ - return o -} - -func (as *array2DHashSet) getBuckets(value Collectable[any]) int { - hash := as.hashcodeFunction(value) - return hash & (len(as.buckets) - 1) -} - -func (as *array2DHashSet) createBuckets(cap int) [][]Collectable[any] { - return make([][]Collectable[any], cap) -} - -func (as *array2DHashSet) createBucket(cap int) []Collectable[any] { - return make([]Collectable[any], cap) -} - -func newArray2DHashSetWithCap( - hashcodeFunction func(interface{}) int, - equalsFunction func(Collectable[any], Collectable[any]) bool, - initCap int, - initBucketCap int, -) *array2DHashSet { - if hashcodeFunction == nil { - hashcodeFunction = standardHashFunction - } - - if equalsFunction == nil { - equalsFunction = standardEqualsFunction - } - - ret := &array2DHashSet{ - hashcodeFunction: hashcodeFunction, - equalsFunction: equalsFunction, - - n: 0, - threshold: int(math.Floor(_initalCapacity * _loadFactor)), - - currentPrime: 1, - initialBucketCapacity: initBucketCap, - } - - ret.buckets = ret.createBuckets(initCap) - return ret -} - -func newArray2DHashSet( - hashcodeFunction func(interface{}) int, - equalsFunction func(Collectable[any], Collectable[any]) bool, -) *array2DHashSet { - return newArray2DHashSetWithCap(hashcodeFunction, equalsFunction, _initalCapacity, _initalBucketCapacity) -} +package antlr + +import "math" + +const ( + _initalCapacity = 16 + _initalBucketCapacity = 8 + _loadFactor = 0.75 +) + +type Set interface { + Add(value interface{}) (added interface{}) + Len() int + Get(value interface{}) (found interface{}) + Contains(value interface{}) bool + Values() []interface{} + Each(f func(interface{}) bool) +} + +type array2DHashSet struct { + buckets [][]Collectable[any] + hashcodeFunction func(interface{}) int + equalsFunction func(Collectable[any], Collectable[any]) bool + + n int // How many elements in set + threshold int // when to expand + + currentPrime int // jump by 4 primes each expand or whatever + initialBucketCapacity int +} + +func (as *array2DHashSet) Each(f func(interface{}) bool) { + if as.Len() < 1 { + return + } + + for _, bucket := range as.buckets { + for _, o := range bucket { + if o == nil { + break + } + if !f(o) { + return + } + } + } +} + +func (as *array2DHashSet) Values() []interface{} { + if as.Len() < 1 { + return nil + } + + values := make([]interface{}, 0, as.Len()) + as.Each(func(i interface{}) bool { + values = append(values, i) + return true + }) + return values +} + +func (as *array2DHashSet) Contains(value Collectable[any]) bool { + return as.Get(value) != nil +} + +func (as *array2DHashSet) Add(value Collectable[any]) interface{} { + if as.n > as.threshold { + as.expand() + } + return as.innerAdd(value) +} + +func (as *array2DHashSet) expand() { + old := as.buckets + + as.currentPrime += 4 + + var ( + newCapacity = len(as.buckets) << 1 + newTable = as.createBuckets(newCapacity) + newBucketLengths = make([]int, len(newTable)) + ) + + as.buckets = newTable + as.threshold = int(float64(newCapacity) * _loadFactor) + + for _, bucket := range old { + if bucket == nil { + continue + } + + for _, o := range bucket { + if o == nil { + break + } + + b := as.getBuckets(o) + bucketLength := newBucketLengths[b] + var newBucket []Collectable[any] + if bucketLength == 0 { + // new bucket + newBucket = as.createBucket(as.initialBucketCapacity) + newTable[b] = newBucket + } else { + newBucket = newTable[b] + if bucketLength == len(newBucket) { + // expand + newBucketCopy := make([]Collectable[any], len(newBucket)<<1) + copy(newBucketCopy[:bucketLength], newBucket) + newBucket = newBucketCopy + newTable[b] = newBucket + } + } + + newBucket[bucketLength] = o + newBucketLengths[b]++ + } + } +} + +func (as *array2DHashSet) Len() int { + return as.n +} + +func (as *array2DHashSet) Get(o Collectable[any]) interface{} { + if o == nil { + return nil + } + + b := as.getBuckets(o) + bucket := as.buckets[b] + if bucket == nil { // no bucket + return nil + } + + for _, e := range bucket { + if e == nil { + return nil // empty slot; not there + } + if as.equalsFunction(e, o) { + return e + } + } + + return nil +} + +func (as *array2DHashSet) innerAdd(o Collectable[any]) interface{} { + b := as.getBuckets(o) + + bucket := as.buckets[b] + + // new bucket + if bucket == nil { + bucket = as.createBucket(as.initialBucketCapacity) + bucket[0] = o + + as.buckets[b] = bucket + as.n++ + return o + } + + // look for it in bucket + for i := 0; i < len(bucket); i++ { + existing := bucket[i] + if existing == nil { // empty slot; not there, add. + bucket[i] = o + as.n++ + return o + } + + if as.equalsFunction(existing, o) { // found existing, quit + return existing + } + } + + // full bucket, expand and add to end + oldLength := len(bucket) + bucketCopy := make([]Collectable[any], oldLength<<1) + copy(bucketCopy[:oldLength], bucket) + bucket = bucketCopy + as.buckets[b] = bucket + bucket[oldLength] = o + as.n++ + return o +} + +func (as *array2DHashSet) getBuckets(value Collectable[any]) int { + hash := as.hashcodeFunction(value) + return hash & (len(as.buckets) - 1) +} + +func (as *array2DHashSet) createBuckets(cap int) [][]Collectable[any] { + return make([][]Collectable[any], cap) +} + +func (as *array2DHashSet) createBucket(cap int) []Collectable[any] { + return make([]Collectable[any], cap) +} + +func newArray2DHashSetWithCap( + hashcodeFunction func(interface{}) int, + equalsFunction func(Collectable[any], Collectable[any]) bool, + initCap int, + initBucketCap int, +) *array2DHashSet { + if hashcodeFunction == nil { + hashcodeFunction = standardHashFunction + } + + if equalsFunction == nil { + equalsFunction = standardEqualsFunction + } + + ret := &array2DHashSet{ + hashcodeFunction: hashcodeFunction, + equalsFunction: equalsFunction, + + n: 0, + threshold: int(math.Floor(_initalCapacity * _loadFactor)), + + currentPrime: 1, + initialBucketCapacity: initBucketCap, + } + + ret.buckets = ret.createBuckets(initCap) + return ret +} + +func newArray2DHashSet( + hashcodeFunction func(interface{}) int, + equalsFunction func(Collectable[any], Collectable[any]) bool, +) *array2DHashSet { + return newArray2DHashSetWithCap(hashcodeFunction, equalsFunction, _initalCapacity, _initalBucketCapacity) +} diff --git a/vendor/github.com/go-chi/chi/.gitignore b/vendor/github.com/go-chi/chi/.gitignore index ba22c99a99..845690a204 100644 --- a/vendor/github.com/go-chi/chi/.gitignore +++ b/vendor/github.com/go-chi/chi/.gitignore @@ -1,3 +1,3 @@ -.idea -*.sw? -.vscode +.idea +*.sw? +.vscode diff --git a/vendor/github.com/go-chi/chi/CHANGELOG.md b/vendor/github.com/go-chi/chi/CHANGELOG.md index 7dd079157f..fefa4eb58a 100644 --- a/vendor/github.com/go-chi/chi/CHANGELOG.md +++ b/vendor/github.com/go-chi/chi/CHANGELOG.md @@ -1,269 +1,269 @@ -# Changelog - -## v1.5.4 (2021-02-27) - -- Undo prior retraction in v1.5.3 as we prepare for v5.0.0 release -- History of changes: see https://github.com/go-chi/chi/compare/v1.5.3...v1.5.4 - - -## v1.5.3 (2021-02-21) - -- Update go.mod to go 1.16 with new retract directive marking all versions without prior go.mod support -- History of changes: see https://github.com/go-chi/chi/compare/v1.5.2...v1.5.3 - - -## v1.5.2 (2021-02-10) - -- Reverting allocation optimization as a precaution as go test -race fails. -- Minor improvements, see history below -- History of changes: see https://github.com/go-chi/chi/compare/v1.5.1...v1.5.2 - - -## v1.5.1 (2020-12-06) - -- Performance improvement: removing 1 allocation by foregoing context.WithValue, thank you @bouk for - your contribution (https://github.com/go-chi/chi/pull/555). Note: new benchmarks posted in README. -- `middleware.CleanPath`: new middleware that clean's request path of double slashes -- deprecate & remove `chi.ServerBaseContext` in favour of stdlib `http.Server#BaseContext` -- plus other tiny improvements, see full commit history below -- History of changes: see https://github.com/go-chi/chi/compare/v4.1.2...v1.5.1 - - -## v1.5.0 (2020-11-12) - now with go.mod support - -`chi` dates back to 2016 with it's original implementation as one of the first routers to adopt the newly introduced -context.Context api to the stdlib -- set out to design a router that is faster, more modular and simpler than anything -else out there -- while not introducing any custom handler types or dependencies. Today, `chi` still has zero dependencies, -and in many ways is future proofed from changes, given it's minimal nature. Between versions, chi's iterations have been very -incremental, with the architecture and api being the same today as it was originally designed in 2016. For this reason it -makes chi a pretty easy project to maintain, as well thanks to the many amazing community contributions over the years -to who all help make chi better (total of 86 contributors to date -- thanks all!). - -Chi has been an labour of love, art and engineering, with the goals to offer beautiful ergonomics, flexibility, performance -and simplicity when building HTTP services with Go. I've strived to keep the router very minimal in surface area / code size, -and always improving the code wherever possible -- and as of today the `chi` package is just 1082 lines of code (not counting -middlewares, which are all optional). As well, I don't have the exact metrics, but from my analysis and email exchanges from -companies and developers, chi is used by thousands of projects around the world -- thank you all as there is no better form of -joy for me than to have art I had started be helpful and enjoyed by others. And of course I use chi in all of my own projects too :) - -For me, the asthetics of chi's code and usage are very important. With the introduction of Go's module support -(which I'm a big fan of), chi's past versioning scheme choice to v2, v3 and v4 would mean I'd require the import path -of "github.com/go-chi/chi/v4", leading to the lengthy discussion at https://github.com/go-chi/chi/issues/462. -Haha, to some, you may be scratching your head why I've spent > 1 year stalling to adopt "/vXX" convention in the import -path -- which isn't horrible in general -- but for chi, I'm unable to accept it as I strive for perfection in it's API design, -aesthetics and simplicity. It just doesn't feel good to me given chi's simple nature -- I do not foresee a "v5" or "v6", -and upgrading between versions in the future will also be just incremental. - -I do understand versioning is a part of the API design as well, which is why the solution for a while has been to "do nothing", -as Go supports both old and new import paths with/out go.mod. However, now that Go module support has had time to iron out kinks and -is adopted everywhere, it's time for chi to get with the times. Luckily, I've discovered a path forward that will make me happy, -while also not breaking anyone's app who adopted a prior versioning from tags in v2/v3/v4. I've made an experimental release of -v1.5.0 with go.mod silently, and tested it with new and old projects, to ensure the developer experience is preserved, and it's -largely unnoticed. Fortunately, Go's toolchain will check the tags of a repo and consider the "latest" tag the one with go.mod. -However, you can still request a specific older tag such as v4.1.2, and everything will "just work". But new users can just -`go get github.com/go-chi/chi` or `go get github.com/go-chi/chi@latest` and they will get the latest version which contains -go.mod support, which is v1.5.0+. `chi` will not change very much over the years, just like it hasn't changed much from 4 years ago. -Therefore, we will stay on v1.x from here on, starting from v1.5.0. Any breaking changes will bump a "minor" release and -backwards-compatible improvements/fixes will bump a "tiny" release. - -For existing projects who want to upgrade to the latest go.mod version, run: `go get -u github.com/go-chi/chi@v1.5.0`, -which will get you on the go.mod version line (as Go's mod cache may still remember v4.x). Brand new systems can run -`go get -u github.com/go-chi/chi` or `go get -u github.com/go-chi/chi@latest` to install chi, which will install v1.5.0+ -built with go.mod support. - -My apologies to the developers who will disagree with the decisions above, but, hope you'll try it and see it's a very -minor request which is backwards compatible and won't break your existing installations. - -Cheers all, happy coding! - - ---- - - -## v4.1.2 (2020-06-02) - -- fix that handles MethodNotAllowed with path variables, thank you @caseyhadden for your contribution -- fix to replace nested wildcards correctly in RoutePattern, thank you @@unmultimedio for your contribution -- History of changes: see https://github.com/go-chi/chi/compare/v4.1.1...v4.1.2 - - -## v4.1.1 (2020-04-16) - -- fix for issue https://github.com/go-chi/chi/issues/411 which allows for overlapping regexp - route to the correct handler through a recursive tree search, thanks to @Jahaja for the PR/fix! -- new middleware.RouteHeaders as a simple router for request headers with wildcard support -- History of changes: see https://github.com/go-chi/chi/compare/v4.1.0...v4.1.1 - - -## v4.1.0 (2020-04-1) - -- middleware.LogEntry: Write method on interface now passes the response header - and an extra interface type useful for custom logger implementations. -- middleware.WrapResponseWriter: minor fix -- middleware.Recoverer: a bit prettier -- History of changes: see https://github.com/go-chi/chi/compare/v4.0.4...v4.1.0 - -## v4.0.4 (2020-03-24) - -- middleware.Recoverer: new pretty stack trace printing (https://github.com/go-chi/chi/pull/496) -- a few minor improvements and fixes -- History of changes: see https://github.com/go-chi/chi/compare/v4.0.3...v4.0.4 - - -## v4.0.3 (2020-01-09) - -- core: fix regexp routing to include default value when param is not matched -- middleware: rewrite of middleware.Compress -- middleware: suppress http.ErrAbortHandler in middleware.Recoverer -- History of changes: see https://github.com/go-chi/chi/compare/v4.0.2...v4.0.3 - - -## v4.0.2 (2019-02-26) - -- Minor fixes -- History of changes: see https://github.com/go-chi/chi/compare/v4.0.1...v4.0.2 - - -## v4.0.1 (2019-01-21) - -- Fixes issue with compress middleware: #382 #385 -- History of changes: see https://github.com/go-chi/chi/compare/v4.0.0...v4.0.1 - - -## v4.0.0 (2019-01-10) - -- chi v4 requires Go 1.10.3+ (or Go 1.9.7+) - we have deprecated support for Go 1.7 and 1.8 -- router: respond with 404 on router with no routes (#362) -- router: additional check to ensure wildcard is at the end of a url pattern (#333) -- middleware: deprecate use of http.CloseNotifier (#347) -- middleware: fix RedirectSlashes to include query params on redirect (#334) -- History of changes: see https://github.com/go-chi/chi/compare/v3.3.4...v4.0.0 - - -## v3.3.4 (2019-01-07) - -- Minor middleware improvements. No changes to core library/router. Moving v3 into its -- own branch as a version of chi for Go 1.7, 1.8, 1.9, 1.10, 1.11 -- History of changes: see https://github.com/go-chi/chi/compare/v3.3.3...v3.3.4 - - -## v3.3.3 (2018-08-27) - -- Minor release -- See https://github.com/go-chi/chi/compare/v3.3.2...v3.3.3 - - -## v3.3.2 (2017-12-22) - -- Support to route trailing slashes on mounted sub-routers (#281) -- middleware: new `ContentCharset` to check matching charsets. Thank you - @csucu for your community contribution! - - -## v3.3.1 (2017-11-20) - -- middleware: new `AllowContentType` handler for explicit whitelist of accepted request Content-Types -- middleware: new `SetHeader` handler for short-hand middleware to set a response header key/value -- Minor bug fixes - - -## v3.3.0 (2017-10-10) - -- New chi.RegisterMethod(method) to add support for custom HTTP methods, see _examples/custom-method for usage -- Deprecated LINK and UNLINK methods from the default list, please use `chi.RegisterMethod("LINK")` and `chi.RegisterMethod("UNLINK")` in an `init()` function - - -## v3.2.1 (2017-08-31) - -- Add new `Match(rctx *Context, method, path string) bool` method to `Routes` interface - and `Mux`. Match searches the mux's routing tree for a handler that matches the method/path -- Add new `RouteMethod` to `*Context` -- Add new `Routes` pointer to `*Context` -- Add new `middleware.GetHead` to route missing HEAD requests to GET handler -- Updated benchmarks (see README) - - -## v3.1.5 (2017-08-02) - -- Setup golint and go vet for the project -- As per golint, we've redefined `func ServerBaseContext(h http.Handler, baseCtx context.Context) http.Handler` - to `func ServerBaseContext(baseCtx context.Context, h http.Handler) http.Handler` - - -## v3.1.0 (2017-07-10) - -- Fix a few minor issues after v3 release -- Move `docgen` sub-pkg to https://github.com/go-chi/docgen -- Move `render` sub-pkg to https://github.com/go-chi/render -- Add new `URLFormat` handler to chi/middleware sub-pkg to make working with url mime - suffixes easier, ie. parsing `/articles/1.json` and `/articles/1.xml`. See comments in - https://github.com/go-chi/chi/blob/master/middleware/url_format.go for example usage. - - -## v3.0.0 (2017-06-21) - -- Major update to chi library with many exciting updates, but also some *breaking changes* -- URL parameter syntax changed from `/:id` to `/{id}` for even more flexible routing, such as - `/articles/{month}-{day}-{year}-{slug}`, `/articles/{id}`, and `/articles/{id}.{ext}` on the - same router -- Support for regexp for routing patterns, in the form of `/{paramKey:regExp}` for example: - `r.Get("/articles/{name:[a-z]+}", h)` and `chi.URLParam(r, "name")` -- Add `Method` and `MethodFunc` to `chi.Router` to allow routing definitions such as - `r.Method("GET", "/", h)` which provides a cleaner interface for custom handlers like - in `_examples/custom-handler` -- Deprecating `mux#FileServer` helper function. Instead, we encourage users to create their - own using file handler with the stdlib, see `_examples/fileserver` for an example -- Add support for LINK/UNLINK http methods via `r.Method()` and `r.MethodFunc()` -- Moved the chi project to its own organization, to allow chi-related community packages to - be easily discovered and supported, at: https://github.com/go-chi -- *NOTE:* please update your import paths to `"github.com/go-chi/chi"` -- *NOTE:* chi v2 is still available at https://github.com/go-chi/chi/tree/v2 - - -## v2.1.0 (2017-03-30) - -- Minor improvements and update to the chi core library -- Introduced a brand new `chi/render` sub-package to complete the story of building - APIs to offer a pattern for managing well-defined request / response payloads. Please - check out the updated `_examples/rest` example for how it works. -- Added `MethodNotAllowed(h http.HandlerFunc)` to chi.Router interface - - -## v2.0.0 (2017-01-06) - -- After many months of v2 being in an RC state with many companies and users running it in - production, the inclusion of some improvements to the middlewares, we are very pleased to - announce v2.0.0 of chi. - - -## v2.0.0-rc1 (2016-07-26) - -- Huge update! chi v2 is a large refactor targetting Go 1.7+. As of Go 1.7, the popular - community `"net/context"` package has been included in the standard library as `"context"` and - utilized by `"net/http"` and `http.Request` to managing deadlines, cancelation signals and other - request-scoped values. We're very excited about the new context addition and are proud to - introduce chi v2, a minimal and powerful routing package for building large HTTP services, - with zero external dependencies. Chi focuses on idiomatic design and encourages the use of - stdlib HTTP handlers and middlwares. -- chi v2 deprecates its `chi.Handler` interface and requires `http.Handler` or `http.HandlerFunc` -- chi v2 stores URL routing parameters and patterns in the standard request context: `r.Context()` -- chi v2 lower-level routing context is accessible by `chi.RouteContext(r.Context()) *chi.Context`, - which provides direct access to URL routing parameters, the routing path and the matching - routing patterns. -- Users upgrading from chi v1 to v2, need to: - 1. Update the old chi.Handler signature, `func(ctx context.Context, w http.ResponseWriter, r *http.Request)` to - the standard http.Handler: `func(w http.ResponseWriter, r *http.Request)` - 2. Use `chi.URLParam(r *http.Request, paramKey string) string` - or `URLParamFromCtx(ctx context.Context, paramKey string) string` to access a url parameter value - - -## v1.0.0 (2016-07-01) - -- Released chi v1 stable https://github.com/go-chi/chi/tree/v1.0.0 for Go 1.6 and older. - - -## v0.9.0 (2016-03-31) - -- Reuse context objects via sync.Pool for zero-allocation routing [#33](https://github.com/go-chi/chi/pull/33) -- BREAKING NOTE: due to subtle API changes, previously `chi.URLParams(ctx)["id"]` used to access url parameters - has changed to: `chi.URLParam(ctx, "id")` +# Changelog + +## v1.5.4 (2021-02-27) + +- Undo prior retraction in v1.5.3 as we prepare for v5.0.0 release +- History of changes: see https://github.com/go-chi/chi/compare/v1.5.3...v1.5.4 + + +## v1.5.3 (2021-02-21) + +- Update go.mod to go 1.16 with new retract directive marking all versions without prior go.mod support +- History of changes: see https://github.com/go-chi/chi/compare/v1.5.2...v1.5.3 + + +## v1.5.2 (2021-02-10) + +- Reverting allocation optimization as a precaution as go test -race fails. +- Minor improvements, see history below +- History of changes: see https://github.com/go-chi/chi/compare/v1.5.1...v1.5.2 + + +## v1.5.1 (2020-12-06) + +- Performance improvement: removing 1 allocation by foregoing context.WithValue, thank you @bouk for + your contribution (https://github.com/go-chi/chi/pull/555). Note: new benchmarks posted in README. +- `middleware.CleanPath`: new middleware that clean's request path of double slashes +- deprecate & remove `chi.ServerBaseContext` in favour of stdlib `http.Server#BaseContext` +- plus other tiny improvements, see full commit history below +- History of changes: see https://github.com/go-chi/chi/compare/v4.1.2...v1.5.1 + + +## v1.5.0 (2020-11-12) - now with go.mod support + +`chi` dates back to 2016 with it's original implementation as one of the first routers to adopt the newly introduced +context.Context api to the stdlib -- set out to design a router that is faster, more modular and simpler than anything +else out there -- while not introducing any custom handler types or dependencies. Today, `chi` still has zero dependencies, +and in many ways is future proofed from changes, given it's minimal nature. Between versions, chi's iterations have been very +incremental, with the architecture and api being the same today as it was originally designed in 2016. For this reason it +makes chi a pretty easy project to maintain, as well thanks to the many amazing community contributions over the years +to who all help make chi better (total of 86 contributors to date -- thanks all!). + +Chi has been an labour of love, art and engineering, with the goals to offer beautiful ergonomics, flexibility, performance +and simplicity when building HTTP services with Go. I've strived to keep the router very minimal in surface area / code size, +and always improving the code wherever possible -- and as of today the `chi` package is just 1082 lines of code (not counting +middlewares, which are all optional). As well, I don't have the exact metrics, but from my analysis and email exchanges from +companies and developers, chi is used by thousands of projects around the world -- thank you all as there is no better form of +joy for me than to have art I had started be helpful and enjoyed by others. And of course I use chi in all of my own projects too :) + +For me, the asthetics of chi's code and usage are very important. With the introduction of Go's module support +(which I'm a big fan of), chi's past versioning scheme choice to v2, v3 and v4 would mean I'd require the import path +of "github.com/go-chi/chi/v4", leading to the lengthy discussion at https://github.com/go-chi/chi/issues/462. +Haha, to some, you may be scratching your head why I've spent > 1 year stalling to adopt "/vXX" convention in the import +path -- which isn't horrible in general -- but for chi, I'm unable to accept it as I strive for perfection in it's API design, +aesthetics and simplicity. It just doesn't feel good to me given chi's simple nature -- I do not foresee a "v5" or "v6", +and upgrading between versions in the future will also be just incremental. + +I do understand versioning is a part of the API design as well, which is why the solution for a while has been to "do nothing", +as Go supports both old and new import paths with/out go.mod. However, now that Go module support has had time to iron out kinks and +is adopted everywhere, it's time for chi to get with the times. Luckily, I've discovered a path forward that will make me happy, +while also not breaking anyone's app who adopted a prior versioning from tags in v2/v3/v4. I've made an experimental release of +v1.5.0 with go.mod silently, and tested it with new and old projects, to ensure the developer experience is preserved, and it's +largely unnoticed. Fortunately, Go's toolchain will check the tags of a repo and consider the "latest" tag the one with go.mod. +However, you can still request a specific older tag such as v4.1.2, and everything will "just work". But new users can just +`go get github.com/go-chi/chi` or `go get github.com/go-chi/chi@latest` and they will get the latest version which contains +go.mod support, which is v1.5.0+. `chi` will not change very much over the years, just like it hasn't changed much from 4 years ago. +Therefore, we will stay on v1.x from here on, starting from v1.5.0. Any breaking changes will bump a "minor" release and +backwards-compatible improvements/fixes will bump a "tiny" release. + +For existing projects who want to upgrade to the latest go.mod version, run: `go get -u github.com/go-chi/chi@v1.5.0`, +which will get you on the go.mod version line (as Go's mod cache may still remember v4.x). Brand new systems can run +`go get -u github.com/go-chi/chi` or `go get -u github.com/go-chi/chi@latest` to install chi, which will install v1.5.0+ +built with go.mod support. + +My apologies to the developers who will disagree with the decisions above, but, hope you'll try it and see it's a very +minor request which is backwards compatible and won't break your existing installations. + +Cheers all, happy coding! + + +--- + + +## v4.1.2 (2020-06-02) + +- fix that handles MethodNotAllowed with path variables, thank you @caseyhadden for your contribution +- fix to replace nested wildcards correctly in RoutePattern, thank you @@unmultimedio for your contribution +- History of changes: see https://github.com/go-chi/chi/compare/v4.1.1...v4.1.2 + + +## v4.1.1 (2020-04-16) + +- fix for issue https://github.com/go-chi/chi/issues/411 which allows for overlapping regexp + route to the correct handler through a recursive tree search, thanks to @Jahaja for the PR/fix! +- new middleware.RouteHeaders as a simple router for request headers with wildcard support +- History of changes: see https://github.com/go-chi/chi/compare/v4.1.0...v4.1.1 + + +## v4.1.0 (2020-04-1) + +- middleware.LogEntry: Write method on interface now passes the response header + and an extra interface type useful for custom logger implementations. +- middleware.WrapResponseWriter: minor fix +- middleware.Recoverer: a bit prettier +- History of changes: see https://github.com/go-chi/chi/compare/v4.0.4...v4.1.0 + +## v4.0.4 (2020-03-24) + +- middleware.Recoverer: new pretty stack trace printing (https://github.com/go-chi/chi/pull/496) +- a few minor improvements and fixes +- History of changes: see https://github.com/go-chi/chi/compare/v4.0.3...v4.0.4 + + +## v4.0.3 (2020-01-09) + +- core: fix regexp routing to include default value when param is not matched +- middleware: rewrite of middleware.Compress +- middleware: suppress http.ErrAbortHandler in middleware.Recoverer +- History of changes: see https://github.com/go-chi/chi/compare/v4.0.2...v4.0.3 + + +## v4.0.2 (2019-02-26) + +- Minor fixes +- History of changes: see https://github.com/go-chi/chi/compare/v4.0.1...v4.0.2 + + +## v4.0.1 (2019-01-21) + +- Fixes issue with compress middleware: #382 #385 +- History of changes: see https://github.com/go-chi/chi/compare/v4.0.0...v4.0.1 + + +## v4.0.0 (2019-01-10) + +- chi v4 requires Go 1.10.3+ (or Go 1.9.7+) - we have deprecated support for Go 1.7 and 1.8 +- router: respond with 404 on router with no routes (#362) +- router: additional check to ensure wildcard is at the end of a url pattern (#333) +- middleware: deprecate use of http.CloseNotifier (#347) +- middleware: fix RedirectSlashes to include query params on redirect (#334) +- History of changes: see https://github.com/go-chi/chi/compare/v3.3.4...v4.0.0 + + +## v3.3.4 (2019-01-07) + +- Minor middleware improvements. No changes to core library/router. Moving v3 into its +- own branch as a version of chi for Go 1.7, 1.8, 1.9, 1.10, 1.11 +- History of changes: see https://github.com/go-chi/chi/compare/v3.3.3...v3.3.4 + + +## v3.3.3 (2018-08-27) + +- Minor release +- See https://github.com/go-chi/chi/compare/v3.3.2...v3.3.3 + + +## v3.3.2 (2017-12-22) + +- Support to route trailing slashes on mounted sub-routers (#281) +- middleware: new `ContentCharset` to check matching charsets. Thank you + @csucu for your community contribution! + + +## v3.3.1 (2017-11-20) + +- middleware: new `AllowContentType` handler for explicit whitelist of accepted request Content-Types +- middleware: new `SetHeader` handler for short-hand middleware to set a response header key/value +- Minor bug fixes + + +## v3.3.0 (2017-10-10) + +- New chi.RegisterMethod(method) to add support for custom HTTP methods, see _examples/custom-method for usage +- Deprecated LINK and UNLINK methods from the default list, please use `chi.RegisterMethod("LINK")` and `chi.RegisterMethod("UNLINK")` in an `init()` function + + +## v3.2.1 (2017-08-31) + +- Add new `Match(rctx *Context, method, path string) bool` method to `Routes` interface + and `Mux`. Match searches the mux's routing tree for a handler that matches the method/path +- Add new `RouteMethod` to `*Context` +- Add new `Routes` pointer to `*Context` +- Add new `middleware.GetHead` to route missing HEAD requests to GET handler +- Updated benchmarks (see README) + + +## v3.1.5 (2017-08-02) + +- Setup golint and go vet for the project +- As per golint, we've redefined `func ServerBaseContext(h http.Handler, baseCtx context.Context) http.Handler` + to `func ServerBaseContext(baseCtx context.Context, h http.Handler) http.Handler` + + +## v3.1.0 (2017-07-10) + +- Fix a few minor issues after v3 release +- Move `docgen` sub-pkg to https://github.com/go-chi/docgen +- Move `render` sub-pkg to https://github.com/go-chi/render +- Add new `URLFormat` handler to chi/middleware sub-pkg to make working with url mime + suffixes easier, ie. parsing `/articles/1.json` and `/articles/1.xml`. See comments in + https://github.com/go-chi/chi/blob/master/middleware/url_format.go for example usage. + + +## v3.0.0 (2017-06-21) + +- Major update to chi library with many exciting updates, but also some *breaking changes* +- URL parameter syntax changed from `/:id` to `/{id}` for even more flexible routing, such as + `/articles/{month}-{day}-{year}-{slug}`, `/articles/{id}`, and `/articles/{id}.{ext}` on the + same router +- Support for regexp for routing patterns, in the form of `/{paramKey:regExp}` for example: + `r.Get("/articles/{name:[a-z]+}", h)` and `chi.URLParam(r, "name")` +- Add `Method` and `MethodFunc` to `chi.Router` to allow routing definitions such as + `r.Method("GET", "/", h)` which provides a cleaner interface for custom handlers like + in `_examples/custom-handler` +- Deprecating `mux#FileServer` helper function. Instead, we encourage users to create their + own using file handler with the stdlib, see `_examples/fileserver` for an example +- Add support for LINK/UNLINK http methods via `r.Method()` and `r.MethodFunc()` +- Moved the chi project to its own organization, to allow chi-related community packages to + be easily discovered and supported, at: https://github.com/go-chi +- *NOTE:* please update your import paths to `"github.com/go-chi/chi"` +- *NOTE:* chi v2 is still available at https://github.com/go-chi/chi/tree/v2 + + +## v2.1.0 (2017-03-30) + +- Minor improvements and update to the chi core library +- Introduced a brand new `chi/render` sub-package to complete the story of building + APIs to offer a pattern for managing well-defined request / response payloads. Please + check out the updated `_examples/rest` example for how it works. +- Added `MethodNotAllowed(h http.HandlerFunc)` to chi.Router interface + + +## v2.0.0 (2017-01-06) + +- After many months of v2 being in an RC state with many companies and users running it in + production, the inclusion of some improvements to the middlewares, we are very pleased to + announce v2.0.0 of chi. + + +## v2.0.0-rc1 (2016-07-26) + +- Huge update! chi v2 is a large refactor targetting Go 1.7+. As of Go 1.7, the popular + community `"net/context"` package has been included in the standard library as `"context"` and + utilized by `"net/http"` and `http.Request` to managing deadlines, cancelation signals and other + request-scoped values. We're very excited about the new context addition and are proud to + introduce chi v2, a minimal and powerful routing package for building large HTTP services, + with zero external dependencies. Chi focuses on idiomatic design and encourages the use of + stdlib HTTP handlers and middlwares. +- chi v2 deprecates its `chi.Handler` interface and requires `http.Handler` or `http.HandlerFunc` +- chi v2 stores URL routing parameters and patterns in the standard request context: `r.Context()` +- chi v2 lower-level routing context is accessible by `chi.RouteContext(r.Context()) *chi.Context`, + which provides direct access to URL routing parameters, the routing path and the matching + routing patterns. +- Users upgrading from chi v1 to v2, need to: + 1. Update the old chi.Handler signature, `func(ctx context.Context, w http.ResponseWriter, r *http.Request)` to + the standard http.Handler: `func(w http.ResponseWriter, r *http.Request)` + 2. Use `chi.URLParam(r *http.Request, paramKey string) string` + or `URLParamFromCtx(ctx context.Context, paramKey string) string` to access a url parameter value + + +## v1.0.0 (2016-07-01) + +- Released chi v1 stable https://github.com/go-chi/chi/tree/v1.0.0 for Go 1.6 and older. + + +## v0.9.0 (2016-03-31) + +- Reuse context objects via sync.Pool for zero-allocation routing [#33](https://github.com/go-chi/chi/pull/33) +- BREAKING NOTE: due to subtle API changes, previously `chi.URLParams(ctx)["id"]` used to access url parameters + has changed to: `chi.URLParam(ctx, "id")` diff --git a/vendor/github.com/go-chi/chi/CONTRIBUTING.md b/vendor/github.com/go-chi/chi/CONTRIBUTING.md index c0ac2dfe85..660fa60fa8 100644 --- a/vendor/github.com/go-chi/chi/CONTRIBUTING.md +++ b/vendor/github.com/go-chi/chi/CONTRIBUTING.md @@ -1,31 +1,31 @@ -# Contributing - -## Prerequisites - -1. [Install Go][go-install]. -2. Download the sources and switch the working directory: - - ```bash - go get -u -d github.com/go-chi/chi - cd $GOPATH/src/github.com/go-chi/chi - ``` - -## Submitting a Pull Request - -A typical workflow is: - -1. [Fork the repository.][fork] [This tip maybe also helpful.][go-fork-tip] -2. [Create a topic branch.][branch] -3. Add tests for your change. -4. Run `go test`. If your tests pass, return to the step 3. -5. Implement the change and ensure the steps from the previous step pass. -6. Run `goimports -w .`, to ensure the new code conforms to Go formatting guideline. -7. [Add, commit and push your changes.][git-help] -8. [Submit a pull request.][pull-req] - -[go-install]: https://golang.org/doc/install -[go-fork-tip]: http://blog.campoy.cat/2014/03/github-and-go-forking-pull-requests-and.html -[fork]: https://help.github.com/articles/fork-a-repo -[branch]: http://learn.github.com/p/branching.html -[git-help]: https://guides.github.com -[pull-req]: https://help.github.com/articles/using-pull-requests +# Contributing + +## Prerequisites + +1. [Install Go][go-install]. +2. Download the sources and switch the working directory: + + ```bash + go get -u -d github.com/go-chi/chi + cd $GOPATH/src/github.com/go-chi/chi + ``` + +## Submitting a Pull Request + +A typical workflow is: + +1. [Fork the repository.][fork] [This tip maybe also helpful.][go-fork-tip] +2. [Create a topic branch.][branch] +3. Add tests for your change. +4. Run `go test`. If your tests pass, return to the step 3. +5. Implement the change and ensure the steps from the previous step pass. +6. Run `goimports -w .`, to ensure the new code conforms to Go formatting guideline. +7. [Add, commit and push your changes.][git-help] +8. [Submit a pull request.][pull-req] + +[go-install]: https://golang.org/doc/install +[go-fork-tip]: http://blog.campoy.cat/2014/03/github-and-go-forking-pull-requests-and.html +[fork]: https://help.github.com/articles/fork-a-repo +[branch]: http://learn.github.com/p/branching.html +[git-help]: https://guides.github.com +[pull-req]: https://help.github.com/articles/using-pull-requests diff --git a/vendor/github.com/go-chi/chi/LICENSE b/vendor/github.com/go-chi/chi/LICENSE index d99f02ffac..2a21df8af5 100644 --- a/vendor/github.com/go-chi/chi/LICENSE +++ b/vendor/github.com/go-chi/chi/LICENSE @@ -1,20 +1,20 @@ -Copyright (c) 2015-present Peter Kieltyka (https://github.com/pkieltyka), Google Inc. - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +Copyright (c) 2015-present Peter Kieltyka (https://github.com/pkieltyka), Google Inc. + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/go-chi/chi/Makefile b/vendor/github.com/go-chi/chi/Makefile index b96c92dd21..1813cbf608 100644 --- a/vendor/github.com/go-chi/chi/Makefile +++ b/vendor/github.com/go-chi/chi/Makefile @@ -1,14 +1,14 @@ -all: - @echo "**********************************************************" - @echo "** chi build tool **" - @echo "**********************************************************" - - -test: - go clean -testcache && $(MAKE) test-router && $(MAKE) test-middleware - -test-router: - go test -race -v . - -test-middleware: - go test -race -v ./middleware +all: + @echo "**********************************************************" + @echo "** chi build tool **" + @echo "**********************************************************" + + +test: + go clean -testcache && $(MAKE) test-router && $(MAKE) test-middleware + +test-router: + go test -race -v . + +test-middleware: + go test -race -v ./middleware diff --git a/vendor/github.com/go-chi/chi/README.md b/vendor/github.com/go-chi/chi/README.md index 1b96d360d7..84422d4e4d 100644 --- a/vendor/github.com/go-chi/chi/README.md +++ b/vendor/github.com/go-chi/chi/README.md @@ -1,511 +1,511 @@ -# chi - - -[![GoDoc Widget]][GoDoc] [![Travis Widget]][Travis] - -`chi` is a lightweight, idiomatic and composable router for building Go HTTP services. It's -especially good at helping you write large REST API services that are kept maintainable as your -project grows and changes. `chi` is built on the new `context` package introduced in Go 1.7 to -handle signaling, cancelation and request-scoped values across a handler chain. - -The focus of the project has been to seek out an elegant and comfortable design for writing -REST API servers, written during the development of the Pressly API service that powers our -public API service, which in turn powers all of our client-side applications. - -The key considerations of chi's design are: project structure, maintainability, standard http -handlers (stdlib-only), developer productivity, and deconstructing a large system into many small -parts. The core router `github.com/go-chi/chi` is quite small (less than 1000 LOC), but we've also -included some useful/optional subpackages: [middleware](/middleware), [render](https://github.com/go-chi/render) -and [docgen](https://github.com/go-chi/docgen). We hope you enjoy it too! - -## Install - -`go get -u github.com/go-chi/chi` - - -## Features - -* **Lightweight** - cloc'd in ~1000 LOC for the chi router -* **Fast** - yes, see [benchmarks](#benchmarks) -* **100% compatible with net/http** - use any http or middleware pkg in the ecosystem that is also compatible with `net/http` -* **Designed for modular/composable APIs** - middlewares, inline middlewares, route groups and sub-router mounting -* **Context control** - built on new `context` package, providing value chaining, cancellations and timeouts -* **Robust** - in production at Pressly, CloudFlare, Heroku, 99Designs, and many others (see [discussion](https://github.com/go-chi/chi/issues/91)) -* **Doc generation** - `docgen` auto-generates routing documentation from your source to JSON or Markdown -* **Go.mod support** - v1.x of chi (starting from v1.5.0), now has go.mod support (see [CHANGELOG](https://github.com/go-chi/chi/blob/master/CHANGELOG.md#v150-2020-11-12---now-with-gomod-support)) -* **No external dependencies** - plain ol' Go stdlib + net/http - - -## Examples - -See [_examples/](https://github.com/go-chi/chi/blob/master/_examples/) for a variety of examples. - - -**As easy as:** - -```go -package main - -import ( - "net/http" - - "github.com/go-chi/chi" - "github.com/go-chi/chi/middleware" -) - -func main() { - r := chi.NewRouter() - r.Use(middleware.Logger) - r.Get("/", func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte("welcome")) - }) - http.ListenAndServe(":3000", r) -} -``` - -**REST Preview:** - -Here is a little preview of how routing looks like with chi. Also take a look at the generated routing docs -in JSON ([routes.json](https://github.com/go-chi/chi/blob/master/_examples/rest/routes.json)) and in -Markdown ([routes.md](https://github.com/go-chi/chi/blob/master/_examples/rest/routes.md)). - -I highly recommend reading the source of the [examples](https://github.com/go-chi/chi/blob/master/_examples/) listed -above, they will show you all the features of chi and serve as a good form of documentation. - -```go -import ( - //... - "context" - "github.com/go-chi/chi" - "github.com/go-chi/chi/middleware" -) - -func main() { - r := chi.NewRouter() - - // A good base middleware stack - r.Use(middleware.RequestID) - r.Use(middleware.RealIP) - r.Use(middleware.Logger) - r.Use(middleware.Recoverer) - - // Set a timeout value on the request context (ctx), that will signal - // through ctx.Done() that the request has timed out and further - // processing should be stopped. - r.Use(middleware.Timeout(60 * time.Second)) - - r.Get("/", func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte("hi")) - }) - - // RESTy routes for "articles" resource - r.Route("/articles", func(r chi.Router) { - r.With(paginate).Get("/", listArticles) // GET /articles - r.With(paginate).Get("/{month}-{day}-{year}", listArticlesByDate) // GET /articles/01-16-2017 - - r.Post("/", createArticle) // POST /articles - r.Get("/search", searchArticles) // GET /articles/search - - // Regexp url parameters: - r.Get("/{articleSlug:[a-z-]+}", getArticleBySlug) // GET /articles/home-is-toronto - - // Subrouters: - r.Route("/{articleID}", func(r chi.Router) { - r.Use(ArticleCtx) - r.Get("/", getArticle) // GET /articles/123 - r.Put("/", updateArticle) // PUT /articles/123 - r.Delete("/", deleteArticle) // DELETE /articles/123 - }) - }) - - // Mount the admin sub-router - r.Mount("/admin", adminRouter()) - - http.ListenAndServe(":3333", r) -} - -func ArticleCtx(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - articleID := chi.URLParam(r, "articleID") - article, err := dbGetArticle(articleID) - if err != nil { - http.Error(w, http.StatusText(404), 404) - return - } - ctx := context.WithValue(r.Context(), "article", article) - next.ServeHTTP(w, r.WithContext(ctx)) - }) -} - -func getArticle(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - article, ok := ctx.Value("article").(*Article) - if !ok { - http.Error(w, http.StatusText(422), 422) - return - } - w.Write([]byte(fmt.Sprintf("title:%s", article.Title))) -} - -// A completely separate router for administrator routes -func adminRouter() http.Handler { - r := chi.NewRouter() - r.Use(AdminOnly) - r.Get("/", adminIndex) - r.Get("/accounts", adminListAccounts) - return r -} - -func AdminOnly(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() - perm, ok := ctx.Value("acl.permission").(YourPermissionType) - if !ok || !perm.IsAdmin() { - http.Error(w, http.StatusText(403), 403) - return - } - next.ServeHTTP(w, r) - }) -} -``` - - -## Router interface - -chi's router is based on a kind of [Patricia Radix trie](https://en.wikipedia.org/wiki/Radix_tree). -The router is fully compatible with `net/http`. - -Built on top of the tree is the `Router` interface: - -```go -// Router consisting of the core routing methods used by chi's Mux, -// using only the standard net/http. -type Router interface { - http.Handler - Routes - - // Use appends one or more middlewares onto the Router stack. - Use(middlewares ...func(http.Handler) http.Handler) - - // With adds inline middlewares for an endpoint handler. - With(middlewares ...func(http.Handler) http.Handler) Router - - // Group adds a new inline-Router along the current routing - // path, with a fresh middleware stack for the inline-Router. - Group(fn func(r Router)) Router - - // Route mounts a sub-Router along a `pattern`` string. - Route(pattern string, fn func(r Router)) Router - - // Mount attaches another http.Handler along ./pattern/* - Mount(pattern string, h http.Handler) - - // Handle and HandleFunc adds routes for `pattern` that matches - // all HTTP methods. - Handle(pattern string, h http.Handler) - HandleFunc(pattern string, h http.HandlerFunc) - - // Method and MethodFunc adds routes for `pattern` that matches - // the `method` HTTP method. - Method(method, pattern string, h http.Handler) - MethodFunc(method, pattern string, h http.HandlerFunc) - - // HTTP-method routing along `pattern` - Connect(pattern string, h http.HandlerFunc) - Delete(pattern string, h http.HandlerFunc) - Get(pattern string, h http.HandlerFunc) - Head(pattern string, h http.HandlerFunc) - Options(pattern string, h http.HandlerFunc) - Patch(pattern string, h http.HandlerFunc) - Post(pattern string, h http.HandlerFunc) - Put(pattern string, h http.HandlerFunc) - Trace(pattern string, h http.HandlerFunc) - - // NotFound defines a handler to respond whenever a route could - // not be found. - NotFound(h http.HandlerFunc) - - // MethodNotAllowed defines a handler to respond whenever a method is - // not allowed. - MethodNotAllowed(h http.HandlerFunc) -} - -// Routes interface adds two methods for router traversal, which is also -// used by the github.com/go-chi/docgen package to generate documentation for Routers. -type Routes interface { - // Routes returns the routing tree in an easily traversable structure. - Routes() []Route - - // Middlewares returns the list of middlewares in use by the router. - Middlewares() Middlewares - - // Match searches the routing tree for a handler that matches - // the method/path - similar to routing a http request, but without - // executing the handler thereafter. - Match(rctx *Context, method, path string) bool -} -``` - -Each routing method accepts a URL `pattern` and chain of `handlers`. The URL pattern -supports named params (ie. `/users/{userID}`) and wildcards (ie. `/admin/*`). URL parameters -can be fetched at runtime by calling `chi.URLParam(r, "userID")` for named parameters -and `chi.URLParam(r, "*")` for a wildcard parameter. - - -### Middleware handlers - -chi's middlewares are just stdlib net/http middleware handlers. There is nothing special -about them, which means the router and all the tooling is designed to be compatible and -friendly with any middleware in the community. This offers much better extensibility and reuse -of packages and is at the heart of chi's purpose. - -Here is an example of a standard net/http middleware where we assign a context key `"user"` -the value of `"123"`. This middleware sets a hypothetical user identifier on the request -context and calls the next handler in the chain. - -```go -// HTTP middleware setting a value on the request context -func MyMiddleware(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // create new context from `r` request context, and assign key `"user"` - // to value of `"123"` - ctx := context.WithValue(r.Context(), "user", "123") - - // call the next handler in the chain, passing the response writer and - // the updated request object with the new context value. - // - // note: context.Context values are nested, so any previously set - // values will be accessible as well, and the new `"user"` key - // will be accessible from this point forward. - next.ServeHTTP(w, r.WithContext(ctx)) - }) -} -``` - - -### Request handlers - -chi uses standard net/http request handlers. This little snippet is an example of a http.Handler -func that reads a user identifier from the request context - hypothetically, identifying -the user sending an authenticated request, validated+set by a previous middleware handler. - -```go -// HTTP handler accessing data from the request context. -func MyRequestHandler(w http.ResponseWriter, r *http.Request) { - // here we read from the request context and fetch out `"user"` key set in - // the MyMiddleware example above. - user := r.Context().Value("user").(string) - - // respond to the client - w.Write([]byte(fmt.Sprintf("hi %s", user))) -} -``` - - -### URL parameters - -chi's router parses and stores URL parameters right onto the request context. Here is -an example of how to access URL params in your net/http handlers. And of course, middlewares -are able to access the same information. - -```go -// HTTP handler accessing the url routing parameters. -func MyRequestHandler(w http.ResponseWriter, r *http.Request) { - // fetch the url parameter `"userID"` from the request of a matching - // routing pattern. An example routing pattern could be: /users/{userID} - userID := chi.URLParam(r, "userID") - - // fetch `"key"` from the request context - ctx := r.Context() - key := ctx.Value("key").(string) - - // respond to the client - w.Write([]byte(fmt.Sprintf("hi %v, %v", userID, key))) -} -``` - - -## Middlewares - -chi comes equipped with an optional `middleware` package, providing a suite of standard -`net/http` middlewares. Please note, any middleware in the ecosystem that is also compatible -with `net/http` can be used with chi's mux. - -### Core middlewares - ----------------------------------------------------------------------------------------------------- -| chi/middleware Handler | description | -| :--------------------- | :---------------------------------------------------------------------- | -| [AllowContentEncoding] | Enforces a whitelist of request Content-Encoding headers | -| [AllowContentType] | Explicit whitelist of accepted request Content-Types | -| [BasicAuth] | Basic HTTP authentication | -| [Compress] | Gzip compression for clients that accept compressed responses | -| [ContentCharset] | Ensure charset for Content-Type request headers | -| [CleanPath] | Clean double slashes from request path | -| [GetHead] | Automatically route undefined HEAD requests to GET handlers | -| [Heartbeat] | Monitoring endpoint to check the servers pulse | -| [Logger] | Logs the start and end of each request with the elapsed processing time | -| [NoCache] | Sets response headers to prevent clients from caching | -| [Profiler] | Easily attach net/http/pprof to your routers | -| [RealIP] | Sets a http.Request's RemoteAddr to either X-Forwarded-For or X-Real-IP | -| [Recoverer] | Gracefully absorb panics and prints the stack trace | -| [RequestID] | Injects a request ID into the context of each request | -| [RedirectSlashes] | Redirect slashes on routing paths | -| [RouteHeaders] | Route handling for request headers | -| [SetHeader] | Short-hand middleware to set a response header key/value | -| [StripSlashes] | Strip slashes on routing paths | -| [Throttle] | Puts a ceiling on the number of concurrent requests | -| [Timeout] | Signals to the request context when the timeout deadline is reached | -| [URLFormat] | Parse extension from url and put it on request context | -| [WithValue] | Short-hand middleware to set a key/value on the request context | ----------------------------------------------------------------------------------------------------- - -[AllowContentEncoding]: https://pkg.go.dev/github.com/go-chi/chi/middleware#AllowContentEncoding -[AllowContentType]: https://pkg.go.dev/github.com/go-chi/chi/middleware#AllowContentType -[BasicAuth]: https://pkg.go.dev/github.com/go-chi/chi/middleware#BasicAuth -[Compress]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Compress -[ContentCharset]: https://pkg.go.dev/github.com/go-chi/chi/middleware#ContentCharset -[CleanPath]: https://pkg.go.dev/github.com/go-chi/chi/middleware#CleanPath -[GetHead]: https://pkg.go.dev/github.com/go-chi/chi/middleware#GetHead -[GetReqID]: https://pkg.go.dev/github.com/go-chi/chi/middleware#GetReqID -[Heartbeat]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Heartbeat -[Logger]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Logger -[NoCache]: https://pkg.go.dev/github.com/go-chi/chi/middleware#NoCache -[Profiler]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Profiler -[RealIP]: https://pkg.go.dev/github.com/go-chi/chi/middleware#RealIP -[Recoverer]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Recoverer -[RedirectSlashes]: https://pkg.go.dev/github.com/go-chi/chi/middleware#RedirectSlashes -[RequestLogger]: https://pkg.go.dev/github.com/go-chi/chi/middleware#RequestLogger -[RequestID]: https://pkg.go.dev/github.com/go-chi/chi/middleware#RequestID -[RouteHeaders]: https://pkg.go.dev/github.com/go-chi/chi/middleware#RouteHeaders -[SetHeader]: https://pkg.go.dev/github.com/go-chi/chi/middleware#SetHeader -[StripSlashes]: https://pkg.go.dev/github.com/go-chi/chi/middleware#StripSlashes -[Throttle]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Throttle -[ThrottleBacklog]: https://pkg.go.dev/github.com/go-chi/chi/middleware#ThrottleBacklog -[ThrottleWithOpts]: https://pkg.go.dev/github.com/go-chi/chi/middleware#ThrottleWithOpts -[Timeout]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Timeout -[URLFormat]: https://pkg.go.dev/github.com/go-chi/chi/middleware#URLFormat -[WithLogEntry]: https://pkg.go.dev/github.com/go-chi/chi/middleware#WithLogEntry -[WithValue]: https://pkg.go.dev/github.com/go-chi/chi/middleware#WithValue -[Compressor]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Compressor -[DefaultLogFormatter]: https://pkg.go.dev/github.com/go-chi/chi/middleware#DefaultLogFormatter -[EncoderFunc]: https://pkg.go.dev/github.com/go-chi/chi/middleware#EncoderFunc -[HeaderRoute]: https://pkg.go.dev/github.com/go-chi/chi/middleware#HeaderRoute -[HeaderRouter]: https://pkg.go.dev/github.com/go-chi/chi/middleware#HeaderRouter -[LogEntry]: https://pkg.go.dev/github.com/go-chi/chi/middleware#LogEntry -[LogFormatter]: https://pkg.go.dev/github.com/go-chi/chi/middleware#LogFormatter -[LoggerInterface]: https://pkg.go.dev/github.com/go-chi/chi/middleware#LoggerInterface -[ThrottleOpts]: https://pkg.go.dev/github.com/go-chi/chi/middleware#ThrottleOpts -[WrapResponseWriter]: https://pkg.go.dev/github.com/go-chi/chi/middleware#WrapResponseWriter - -### Extra middlewares & packages - -Please see https://github.com/go-chi for additional packages. - --------------------------------------------------------------------------------------------------------------------- -| package | description | -|:---------------------------------------------------|:------------------------------------------------------------- -| [cors](https://github.com/go-chi/cors) | Cross-origin resource sharing (CORS) | -| [docgen](https://github.com/go-chi/docgen) | Print chi.Router routes at runtime | -| [jwtauth](https://github.com/go-chi/jwtauth) | JWT authentication | -| [hostrouter](https://github.com/go-chi/hostrouter) | Domain/host based request routing | -| [httplog](https://github.com/go-chi/httplog) | Small but powerful structured HTTP request logging | -| [httprate](https://github.com/go-chi/httprate) | HTTP request rate limiter | -| [httptracer](https://github.com/go-chi/httptracer) | HTTP request performance tracing library | -| [httpvcr](https://github.com/go-chi/httpvcr) | Write deterministic tests for external sources | -| [stampede](https://github.com/go-chi/stampede) | HTTP request coalescer | --------------------------------------------------------------------------------------------------------------------- - - -## context? - -`context` is a tiny pkg that provides simple interface to signal context across call stacks -and goroutines. It was originally written by [Sameer Ajmani](https://github.com/Sajmani) -and is available in stdlib since go1.7. - -Learn more at https://blog.golang.org/context - -and.. -* Docs: https://golang.org/pkg/context -* Source: https://github.com/golang/go/tree/master/src/context - - -## Benchmarks - -The benchmark suite: https://github.com/pkieltyka/go-http-routing-benchmark - -Results as of Nov 29, 2020 with Go 1.15.5 on Linux AMD 3950x - -```shell -BenchmarkChi_Param 3075895 384 ns/op 400 B/op 2 allocs/op -BenchmarkChi_Param5 2116603 566 ns/op 400 B/op 2 allocs/op -BenchmarkChi_Param20 964117 1227 ns/op 400 B/op 2 allocs/op -BenchmarkChi_ParamWrite 2863413 420 ns/op 400 B/op 2 allocs/op -BenchmarkChi_GithubStatic 3045488 395 ns/op 400 B/op 2 allocs/op -BenchmarkChi_GithubParam 2204115 540 ns/op 400 B/op 2 allocs/op -BenchmarkChi_GithubAll 10000 113811 ns/op 81203 B/op 406 allocs/op -BenchmarkChi_GPlusStatic 3337485 359 ns/op 400 B/op 2 allocs/op -BenchmarkChi_GPlusParam 2825853 423 ns/op 400 B/op 2 allocs/op -BenchmarkChi_GPlus2Params 2471697 483 ns/op 400 B/op 2 allocs/op -BenchmarkChi_GPlusAll 194220 5950 ns/op 5200 B/op 26 allocs/op -BenchmarkChi_ParseStatic 3365324 356 ns/op 400 B/op 2 allocs/op -BenchmarkChi_ParseParam 2976614 404 ns/op 400 B/op 2 allocs/op -BenchmarkChi_Parse2Params 2638084 439 ns/op 400 B/op 2 allocs/op -BenchmarkChi_ParseAll 109567 11295 ns/op 10400 B/op 52 allocs/op -BenchmarkChi_StaticAll 16846 71308 ns/op 62802 B/op 314 allocs/op -``` - -Comparison with other routers: https://gist.github.com/pkieltyka/123032f12052520aaccab752bd3e78cc - -NOTE: the allocs in the benchmark above are from the calls to http.Request's -`WithContext(context.Context)` method that clones the http.Request, sets the `Context()` -on the duplicated (alloc'd) request and returns it the new request object. This is just -how setting context on a request in Go works. - - -## Go module support & note on chi's versioning - -* Go.mod support means we reset our versioning starting from v1.5 (see [CHANGELOG](https://github.com/go-chi/chi/blob/master/CHANGELOG.md#v150-2020-11-12---now-with-gomod-support)) -* All older tags are preserved, are backwards-compatible and will "just work" as they -* Brand new systems can run `go get -u github.com/go-chi/chi` as normal, or `go get -u github.com/go-chi/chi@latest` -to install chi, which will install v1.x+ built with go.mod support, starting from v1.5.0. -* For existing projects who want to upgrade to the latest go.mod version, run: `go get -u github.com/go-chi/chi@v1.5.0`, -which will get you on the go.mod version line (as Go's mod cache may still remember v4.x). -* Any breaking changes will bump a "minor" release and backwards-compatible improvements/fixes will bump a "tiny" release. - - -## Credits - -* Carl Jackson for https://github.com/zenazn/goji - * Parts of chi's thinking comes from goji, and chi's middleware package - sources from goji. -* Armon Dadgar for https://github.com/armon/go-radix -* Contributions: [@VojtechVitek](https://github.com/VojtechVitek) - -We'll be more than happy to see [your contributions](./CONTRIBUTING.md)! - - -## Beyond REST - -chi is just a http router that lets you decompose request handling into many smaller layers. -Many companies use chi to write REST services for their public APIs. But, REST is just a convention -for managing state via HTTP, and there's a lot of other pieces required to write a complete client-server -system or network of microservices. - -Looking beyond REST, I also recommend some newer works in the field: -* [webrpc](https://github.com/webrpc/webrpc) - Web-focused RPC client+server framework with code-gen -* [gRPC](https://github.com/grpc/grpc-go) - Google's RPC framework via protobufs -* [graphql](https://github.com/99designs/gqlgen) - Declarative query language -* [NATS](https://nats.io) - lightweight pub-sub - - -## License - -Copyright (c) 2015-present [Peter Kieltyka](https://github.com/pkieltyka) - -Licensed under [MIT License](./LICENSE) - -[GoDoc]: https://pkg.go.dev/github.com/go-chi/chi?tab=versions -[GoDoc Widget]: https://godoc.org/github.com/go-chi/chi?status.svg -[Travis]: https://travis-ci.org/go-chi/chi -[Travis Widget]: https://travis-ci.org/go-chi/chi.svg?branch=master +# chi + + +[![GoDoc Widget]][GoDoc] [![Travis Widget]][Travis] + +`chi` is a lightweight, idiomatic and composable router for building Go HTTP services. It's +especially good at helping you write large REST API services that are kept maintainable as your +project grows and changes. `chi` is built on the new `context` package introduced in Go 1.7 to +handle signaling, cancelation and request-scoped values across a handler chain. + +The focus of the project has been to seek out an elegant and comfortable design for writing +REST API servers, written during the development of the Pressly API service that powers our +public API service, which in turn powers all of our client-side applications. + +The key considerations of chi's design are: project structure, maintainability, standard http +handlers (stdlib-only), developer productivity, and deconstructing a large system into many small +parts. The core router `github.com/go-chi/chi` is quite small (less than 1000 LOC), but we've also +included some useful/optional subpackages: [middleware](/middleware), [render](https://github.com/go-chi/render) +and [docgen](https://github.com/go-chi/docgen). We hope you enjoy it too! + +## Install + +`go get -u github.com/go-chi/chi` + + +## Features + +* **Lightweight** - cloc'd in ~1000 LOC for the chi router +* **Fast** - yes, see [benchmarks](#benchmarks) +* **100% compatible with net/http** - use any http or middleware pkg in the ecosystem that is also compatible with `net/http` +* **Designed for modular/composable APIs** - middlewares, inline middlewares, route groups and sub-router mounting +* **Context control** - built on new `context` package, providing value chaining, cancellations and timeouts +* **Robust** - in production at Pressly, CloudFlare, Heroku, 99Designs, and many others (see [discussion](https://github.com/go-chi/chi/issues/91)) +* **Doc generation** - `docgen` auto-generates routing documentation from your source to JSON or Markdown +* **Go.mod support** - v1.x of chi (starting from v1.5.0), now has go.mod support (see [CHANGELOG](https://github.com/go-chi/chi/blob/master/CHANGELOG.md#v150-2020-11-12---now-with-gomod-support)) +* **No external dependencies** - plain ol' Go stdlib + net/http + + +## Examples + +See [_examples/](https://github.com/go-chi/chi/blob/master/_examples/) for a variety of examples. + + +**As easy as:** + +```go +package main + +import ( + "net/http" + + "github.com/go-chi/chi" + "github.com/go-chi/chi/middleware" +) + +func main() { + r := chi.NewRouter() + r.Use(middleware.Logger) + r.Get("/", func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("welcome")) + }) + http.ListenAndServe(":3000", r) +} +``` + +**REST Preview:** + +Here is a little preview of how routing looks like with chi. Also take a look at the generated routing docs +in JSON ([routes.json](https://github.com/go-chi/chi/blob/master/_examples/rest/routes.json)) and in +Markdown ([routes.md](https://github.com/go-chi/chi/blob/master/_examples/rest/routes.md)). + +I highly recommend reading the source of the [examples](https://github.com/go-chi/chi/blob/master/_examples/) listed +above, they will show you all the features of chi and serve as a good form of documentation. + +```go +import ( + //... + "context" + "github.com/go-chi/chi" + "github.com/go-chi/chi/middleware" +) + +func main() { + r := chi.NewRouter() + + // A good base middleware stack + r.Use(middleware.RequestID) + r.Use(middleware.RealIP) + r.Use(middleware.Logger) + r.Use(middleware.Recoverer) + + // Set a timeout value on the request context (ctx), that will signal + // through ctx.Done() that the request has timed out and further + // processing should be stopped. + r.Use(middleware.Timeout(60 * time.Second)) + + r.Get("/", func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("hi")) + }) + + // RESTy routes for "articles" resource + r.Route("/articles", func(r chi.Router) { + r.With(paginate).Get("/", listArticles) // GET /articles + r.With(paginate).Get("/{month}-{day}-{year}", listArticlesByDate) // GET /articles/01-16-2017 + + r.Post("/", createArticle) // POST /articles + r.Get("/search", searchArticles) // GET /articles/search + + // Regexp url parameters: + r.Get("/{articleSlug:[a-z-]+}", getArticleBySlug) // GET /articles/home-is-toronto + + // Subrouters: + r.Route("/{articleID}", func(r chi.Router) { + r.Use(ArticleCtx) + r.Get("/", getArticle) // GET /articles/123 + r.Put("/", updateArticle) // PUT /articles/123 + r.Delete("/", deleteArticle) // DELETE /articles/123 + }) + }) + + // Mount the admin sub-router + r.Mount("/admin", adminRouter()) + + http.ListenAndServe(":3333", r) +} + +func ArticleCtx(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + articleID := chi.URLParam(r, "articleID") + article, err := dbGetArticle(articleID) + if err != nil { + http.Error(w, http.StatusText(404), 404) + return + } + ctx := context.WithValue(r.Context(), "article", article) + next.ServeHTTP(w, r.WithContext(ctx)) + }) +} + +func getArticle(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + article, ok := ctx.Value("article").(*Article) + if !ok { + http.Error(w, http.StatusText(422), 422) + return + } + w.Write([]byte(fmt.Sprintf("title:%s", article.Title))) +} + +// A completely separate router for administrator routes +func adminRouter() http.Handler { + r := chi.NewRouter() + r.Use(AdminOnly) + r.Get("/", adminIndex) + r.Get("/accounts", adminListAccounts) + return r +} + +func AdminOnly(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + perm, ok := ctx.Value("acl.permission").(YourPermissionType) + if !ok || !perm.IsAdmin() { + http.Error(w, http.StatusText(403), 403) + return + } + next.ServeHTTP(w, r) + }) +} +``` + + +## Router interface + +chi's router is based on a kind of [Patricia Radix trie](https://en.wikipedia.org/wiki/Radix_tree). +The router is fully compatible with `net/http`. + +Built on top of the tree is the `Router` interface: + +```go +// Router consisting of the core routing methods used by chi's Mux, +// using only the standard net/http. +type Router interface { + http.Handler + Routes + + // Use appends one or more middlewares onto the Router stack. + Use(middlewares ...func(http.Handler) http.Handler) + + // With adds inline middlewares for an endpoint handler. + With(middlewares ...func(http.Handler) http.Handler) Router + + // Group adds a new inline-Router along the current routing + // path, with a fresh middleware stack for the inline-Router. + Group(fn func(r Router)) Router + + // Route mounts a sub-Router along a `pattern`` string. + Route(pattern string, fn func(r Router)) Router + + // Mount attaches another http.Handler along ./pattern/* + Mount(pattern string, h http.Handler) + + // Handle and HandleFunc adds routes for `pattern` that matches + // all HTTP methods. + Handle(pattern string, h http.Handler) + HandleFunc(pattern string, h http.HandlerFunc) + + // Method and MethodFunc adds routes for `pattern` that matches + // the `method` HTTP method. + Method(method, pattern string, h http.Handler) + MethodFunc(method, pattern string, h http.HandlerFunc) + + // HTTP-method routing along `pattern` + Connect(pattern string, h http.HandlerFunc) + Delete(pattern string, h http.HandlerFunc) + Get(pattern string, h http.HandlerFunc) + Head(pattern string, h http.HandlerFunc) + Options(pattern string, h http.HandlerFunc) + Patch(pattern string, h http.HandlerFunc) + Post(pattern string, h http.HandlerFunc) + Put(pattern string, h http.HandlerFunc) + Trace(pattern string, h http.HandlerFunc) + + // NotFound defines a handler to respond whenever a route could + // not be found. + NotFound(h http.HandlerFunc) + + // MethodNotAllowed defines a handler to respond whenever a method is + // not allowed. + MethodNotAllowed(h http.HandlerFunc) +} + +// Routes interface adds two methods for router traversal, which is also +// used by the github.com/go-chi/docgen package to generate documentation for Routers. +type Routes interface { + // Routes returns the routing tree in an easily traversable structure. + Routes() []Route + + // Middlewares returns the list of middlewares in use by the router. + Middlewares() Middlewares + + // Match searches the routing tree for a handler that matches + // the method/path - similar to routing a http request, but without + // executing the handler thereafter. + Match(rctx *Context, method, path string) bool +} +``` + +Each routing method accepts a URL `pattern` and chain of `handlers`. The URL pattern +supports named params (ie. `/users/{userID}`) and wildcards (ie. `/admin/*`). URL parameters +can be fetched at runtime by calling `chi.URLParam(r, "userID")` for named parameters +and `chi.URLParam(r, "*")` for a wildcard parameter. + + +### Middleware handlers + +chi's middlewares are just stdlib net/http middleware handlers. There is nothing special +about them, which means the router and all the tooling is designed to be compatible and +friendly with any middleware in the community. This offers much better extensibility and reuse +of packages and is at the heart of chi's purpose. + +Here is an example of a standard net/http middleware where we assign a context key `"user"` +the value of `"123"`. This middleware sets a hypothetical user identifier on the request +context and calls the next handler in the chain. + +```go +// HTTP middleware setting a value on the request context +func MyMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // create new context from `r` request context, and assign key `"user"` + // to value of `"123"` + ctx := context.WithValue(r.Context(), "user", "123") + + // call the next handler in the chain, passing the response writer and + // the updated request object with the new context value. + // + // note: context.Context values are nested, so any previously set + // values will be accessible as well, and the new `"user"` key + // will be accessible from this point forward. + next.ServeHTTP(w, r.WithContext(ctx)) + }) +} +``` + + +### Request handlers + +chi uses standard net/http request handlers. This little snippet is an example of a http.Handler +func that reads a user identifier from the request context - hypothetically, identifying +the user sending an authenticated request, validated+set by a previous middleware handler. + +```go +// HTTP handler accessing data from the request context. +func MyRequestHandler(w http.ResponseWriter, r *http.Request) { + // here we read from the request context and fetch out `"user"` key set in + // the MyMiddleware example above. + user := r.Context().Value("user").(string) + + // respond to the client + w.Write([]byte(fmt.Sprintf("hi %s", user))) +} +``` + + +### URL parameters + +chi's router parses and stores URL parameters right onto the request context. Here is +an example of how to access URL params in your net/http handlers. And of course, middlewares +are able to access the same information. + +```go +// HTTP handler accessing the url routing parameters. +func MyRequestHandler(w http.ResponseWriter, r *http.Request) { + // fetch the url parameter `"userID"` from the request of a matching + // routing pattern. An example routing pattern could be: /users/{userID} + userID := chi.URLParam(r, "userID") + + // fetch `"key"` from the request context + ctx := r.Context() + key := ctx.Value("key").(string) + + // respond to the client + w.Write([]byte(fmt.Sprintf("hi %v, %v", userID, key))) +} +``` + + +## Middlewares + +chi comes equipped with an optional `middleware` package, providing a suite of standard +`net/http` middlewares. Please note, any middleware in the ecosystem that is also compatible +with `net/http` can be used with chi's mux. + +### Core middlewares + +---------------------------------------------------------------------------------------------------- +| chi/middleware Handler | description | +| :--------------------- | :---------------------------------------------------------------------- | +| [AllowContentEncoding] | Enforces a whitelist of request Content-Encoding headers | +| [AllowContentType] | Explicit whitelist of accepted request Content-Types | +| [BasicAuth] | Basic HTTP authentication | +| [Compress] | Gzip compression for clients that accept compressed responses | +| [ContentCharset] | Ensure charset for Content-Type request headers | +| [CleanPath] | Clean double slashes from request path | +| [GetHead] | Automatically route undefined HEAD requests to GET handlers | +| [Heartbeat] | Monitoring endpoint to check the servers pulse | +| [Logger] | Logs the start and end of each request with the elapsed processing time | +| [NoCache] | Sets response headers to prevent clients from caching | +| [Profiler] | Easily attach net/http/pprof to your routers | +| [RealIP] | Sets a http.Request's RemoteAddr to either X-Forwarded-For or X-Real-IP | +| [Recoverer] | Gracefully absorb panics and prints the stack trace | +| [RequestID] | Injects a request ID into the context of each request | +| [RedirectSlashes] | Redirect slashes on routing paths | +| [RouteHeaders] | Route handling for request headers | +| [SetHeader] | Short-hand middleware to set a response header key/value | +| [StripSlashes] | Strip slashes on routing paths | +| [Throttle] | Puts a ceiling on the number of concurrent requests | +| [Timeout] | Signals to the request context when the timeout deadline is reached | +| [URLFormat] | Parse extension from url and put it on request context | +| [WithValue] | Short-hand middleware to set a key/value on the request context | +---------------------------------------------------------------------------------------------------- + +[AllowContentEncoding]: https://pkg.go.dev/github.com/go-chi/chi/middleware#AllowContentEncoding +[AllowContentType]: https://pkg.go.dev/github.com/go-chi/chi/middleware#AllowContentType +[BasicAuth]: https://pkg.go.dev/github.com/go-chi/chi/middleware#BasicAuth +[Compress]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Compress +[ContentCharset]: https://pkg.go.dev/github.com/go-chi/chi/middleware#ContentCharset +[CleanPath]: https://pkg.go.dev/github.com/go-chi/chi/middleware#CleanPath +[GetHead]: https://pkg.go.dev/github.com/go-chi/chi/middleware#GetHead +[GetReqID]: https://pkg.go.dev/github.com/go-chi/chi/middleware#GetReqID +[Heartbeat]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Heartbeat +[Logger]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Logger +[NoCache]: https://pkg.go.dev/github.com/go-chi/chi/middleware#NoCache +[Profiler]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Profiler +[RealIP]: https://pkg.go.dev/github.com/go-chi/chi/middleware#RealIP +[Recoverer]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Recoverer +[RedirectSlashes]: https://pkg.go.dev/github.com/go-chi/chi/middleware#RedirectSlashes +[RequestLogger]: https://pkg.go.dev/github.com/go-chi/chi/middleware#RequestLogger +[RequestID]: https://pkg.go.dev/github.com/go-chi/chi/middleware#RequestID +[RouteHeaders]: https://pkg.go.dev/github.com/go-chi/chi/middleware#RouteHeaders +[SetHeader]: https://pkg.go.dev/github.com/go-chi/chi/middleware#SetHeader +[StripSlashes]: https://pkg.go.dev/github.com/go-chi/chi/middleware#StripSlashes +[Throttle]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Throttle +[ThrottleBacklog]: https://pkg.go.dev/github.com/go-chi/chi/middleware#ThrottleBacklog +[ThrottleWithOpts]: https://pkg.go.dev/github.com/go-chi/chi/middleware#ThrottleWithOpts +[Timeout]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Timeout +[URLFormat]: https://pkg.go.dev/github.com/go-chi/chi/middleware#URLFormat +[WithLogEntry]: https://pkg.go.dev/github.com/go-chi/chi/middleware#WithLogEntry +[WithValue]: https://pkg.go.dev/github.com/go-chi/chi/middleware#WithValue +[Compressor]: https://pkg.go.dev/github.com/go-chi/chi/middleware#Compressor +[DefaultLogFormatter]: https://pkg.go.dev/github.com/go-chi/chi/middleware#DefaultLogFormatter +[EncoderFunc]: https://pkg.go.dev/github.com/go-chi/chi/middleware#EncoderFunc +[HeaderRoute]: https://pkg.go.dev/github.com/go-chi/chi/middleware#HeaderRoute +[HeaderRouter]: https://pkg.go.dev/github.com/go-chi/chi/middleware#HeaderRouter +[LogEntry]: https://pkg.go.dev/github.com/go-chi/chi/middleware#LogEntry +[LogFormatter]: https://pkg.go.dev/github.com/go-chi/chi/middleware#LogFormatter +[LoggerInterface]: https://pkg.go.dev/github.com/go-chi/chi/middleware#LoggerInterface +[ThrottleOpts]: https://pkg.go.dev/github.com/go-chi/chi/middleware#ThrottleOpts +[WrapResponseWriter]: https://pkg.go.dev/github.com/go-chi/chi/middleware#WrapResponseWriter + +### Extra middlewares & packages + +Please see https://github.com/go-chi for additional packages. + +-------------------------------------------------------------------------------------------------------------------- +| package | description | +|:---------------------------------------------------|:------------------------------------------------------------- +| [cors](https://github.com/go-chi/cors) | Cross-origin resource sharing (CORS) | +| [docgen](https://github.com/go-chi/docgen) | Print chi.Router routes at runtime | +| [jwtauth](https://github.com/go-chi/jwtauth) | JWT authentication | +| [hostrouter](https://github.com/go-chi/hostrouter) | Domain/host based request routing | +| [httplog](https://github.com/go-chi/httplog) | Small but powerful structured HTTP request logging | +| [httprate](https://github.com/go-chi/httprate) | HTTP request rate limiter | +| [httptracer](https://github.com/go-chi/httptracer) | HTTP request performance tracing library | +| [httpvcr](https://github.com/go-chi/httpvcr) | Write deterministic tests for external sources | +| [stampede](https://github.com/go-chi/stampede) | HTTP request coalescer | +-------------------------------------------------------------------------------------------------------------------- + + +## context? + +`context` is a tiny pkg that provides simple interface to signal context across call stacks +and goroutines. It was originally written by [Sameer Ajmani](https://github.com/Sajmani) +and is available in stdlib since go1.7. + +Learn more at https://blog.golang.org/context + +and.. +* Docs: https://golang.org/pkg/context +* Source: https://github.com/golang/go/tree/master/src/context + + +## Benchmarks + +The benchmark suite: https://github.com/pkieltyka/go-http-routing-benchmark + +Results as of Nov 29, 2020 with Go 1.15.5 on Linux AMD 3950x + +```shell +BenchmarkChi_Param 3075895 384 ns/op 400 B/op 2 allocs/op +BenchmarkChi_Param5 2116603 566 ns/op 400 B/op 2 allocs/op +BenchmarkChi_Param20 964117 1227 ns/op 400 B/op 2 allocs/op +BenchmarkChi_ParamWrite 2863413 420 ns/op 400 B/op 2 allocs/op +BenchmarkChi_GithubStatic 3045488 395 ns/op 400 B/op 2 allocs/op +BenchmarkChi_GithubParam 2204115 540 ns/op 400 B/op 2 allocs/op +BenchmarkChi_GithubAll 10000 113811 ns/op 81203 B/op 406 allocs/op +BenchmarkChi_GPlusStatic 3337485 359 ns/op 400 B/op 2 allocs/op +BenchmarkChi_GPlusParam 2825853 423 ns/op 400 B/op 2 allocs/op +BenchmarkChi_GPlus2Params 2471697 483 ns/op 400 B/op 2 allocs/op +BenchmarkChi_GPlusAll 194220 5950 ns/op 5200 B/op 26 allocs/op +BenchmarkChi_ParseStatic 3365324 356 ns/op 400 B/op 2 allocs/op +BenchmarkChi_ParseParam 2976614 404 ns/op 400 B/op 2 allocs/op +BenchmarkChi_Parse2Params 2638084 439 ns/op 400 B/op 2 allocs/op +BenchmarkChi_ParseAll 109567 11295 ns/op 10400 B/op 52 allocs/op +BenchmarkChi_StaticAll 16846 71308 ns/op 62802 B/op 314 allocs/op +``` + +Comparison with other routers: https://gist.github.com/pkieltyka/123032f12052520aaccab752bd3e78cc + +NOTE: the allocs in the benchmark above are from the calls to http.Request's +`WithContext(context.Context)` method that clones the http.Request, sets the `Context()` +on the duplicated (alloc'd) request and returns it the new request object. This is just +how setting context on a request in Go works. + + +## Go module support & note on chi's versioning + +* Go.mod support means we reset our versioning starting from v1.5 (see [CHANGELOG](https://github.com/go-chi/chi/blob/master/CHANGELOG.md#v150-2020-11-12---now-with-gomod-support)) +* All older tags are preserved, are backwards-compatible and will "just work" as they +* Brand new systems can run `go get -u github.com/go-chi/chi` as normal, or `go get -u github.com/go-chi/chi@latest` +to install chi, which will install v1.x+ built with go.mod support, starting from v1.5.0. +* For existing projects who want to upgrade to the latest go.mod version, run: `go get -u github.com/go-chi/chi@v1.5.0`, +which will get you on the go.mod version line (as Go's mod cache may still remember v4.x). +* Any breaking changes will bump a "minor" release and backwards-compatible improvements/fixes will bump a "tiny" release. + + +## Credits + +* Carl Jackson for https://github.com/zenazn/goji + * Parts of chi's thinking comes from goji, and chi's middleware package + sources from goji. +* Armon Dadgar for https://github.com/armon/go-radix +* Contributions: [@VojtechVitek](https://github.com/VojtechVitek) + +We'll be more than happy to see [your contributions](./CONTRIBUTING.md)! + + +## Beyond REST + +chi is just a http router that lets you decompose request handling into many smaller layers. +Many companies use chi to write REST services for their public APIs. But, REST is just a convention +for managing state via HTTP, and there's a lot of other pieces required to write a complete client-server +system or network of microservices. + +Looking beyond REST, I also recommend some newer works in the field: +* [webrpc](https://github.com/webrpc/webrpc) - Web-focused RPC client+server framework with code-gen +* [gRPC](https://github.com/grpc/grpc-go) - Google's RPC framework via protobufs +* [graphql](https://github.com/99designs/gqlgen) - Declarative query language +* [NATS](https://nats.io) - lightweight pub-sub + + +## License + +Copyright (c) 2015-present [Peter Kieltyka](https://github.com/pkieltyka) + +Licensed under [MIT License](./LICENSE) + +[GoDoc]: https://pkg.go.dev/github.com/go-chi/chi?tab=versions +[GoDoc Widget]: https://godoc.org/github.com/go-chi/chi?status.svg +[Travis]: https://travis-ci.org/go-chi/chi +[Travis Widget]: https://travis-ci.org/go-chi/chi.svg?branch=master diff --git a/vendor/github.com/go-chi/chi/chain.go b/vendor/github.com/go-chi/chi/chain.go index 88e6846138..a8e7384633 100644 --- a/vendor/github.com/go-chi/chi/chain.go +++ b/vendor/github.com/go-chi/chi/chain.go @@ -1,49 +1,49 @@ -package chi - -import "net/http" - -// Chain returns a Middlewares type from a slice of middleware handlers. -func Chain(middlewares ...func(http.Handler) http.Handler) Middlewares { - return Middlewares(middlewares) -} - -// Handler builds and returns a http.Handler from the chain of middlewares, -// with `h http.Handler` as the final handler. -func (mws Middlewares) Handler(h http.Handler) http.Handler { - return &ChainHandler{mws, h, chain(mws, h)} -} - -// HandlerFunc builds and returns a http.Handler from the chain of middlewares, -// with `h http.Handler` as the final handler. -func (mws Middlewares) HandlerFunc(h http.HandlerFunc) http.Handler { - return &ChainHandler{mws, h, chain(mws, h)} -} - -// ChainHandler is a http.Handler with support for handler composition and -// execution. -type ChainHandler struct { - Middlewares Middlewares - Endpoint http.Handler - chain http.Handler -} - -func (c *ChainHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - c.chain.ServeHTTP(w, r) -} - -// chain builds a http.Handler composed of an inline middleware stack and endpoint -// handler in the order they are passed. -func chain(middlewares []func(http.Handler) http.Handler, endpoint http.Handler) http.Handler { - // Return ahead of time if there aren't any middlewares for the chain - if len(middlewares) == 0 { - return endpoint - } - - // Wrap the end handler with the middleware chain - h := middlewares[len(middlewares)-1](endpoint) - for i := len(middlewares) - 2; i >= 0; i-- { - h = middlewares[i](h) - } - - return h -} +package chi + +import "net/http" + +// Chain returns a Middlewares type from a slice of middleware handlers. +func Chain(middlewares ...func(http.Handler) http.Handler) Middlewares { + return Middlewares(middlewares) +} + +// Handler builds and returns a http.Handler from the chain of middlewares, +// with `h http.Handler` as the final handler. +func (mws Middlewares) Handler(h http.Handler) http.Handler { + return &ChainHandler{mws, h, chain(mws, h)} +} + +// HandlerFunc builds and returns a http.Handler from the chain of middlewares, +// with `h http.Handler` as the final handler. +func (mws Middlewares) HandlerFunc(h http.HandlerFunc) http.Handler { + return &ChainHandler{mws, h, chain(mws, h)} +} + +// ChainHandler is a http.Handler with support for handler composition and +// execution. +type ChainHandler struct { + Middlewares Middlewares + Endpoint http.Handler + chain http.Handler +} + +func (c *ChainHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + c.chain.ServeHTTP(w, r) +} + +// chain builds a http.Handler composed of an inline middleware stack and endpoint +// handler in the order they are passed. +func chain(middlewares []func(http.Handler) http.Handler, endpoint http.Handler) http.Handler { + // Return ahead of time if there aren't any middlewares for the chain + if len(middlewares) == 0 { + return endpoint + } + + // Wrap the end handler with the middleware chain + h := middlewares[len(middlewares)-1](endpoint) + for i := len(middlewares) - 2; i >= 0; i-- { + h = middlewares[i](h) + } + + return h +} diff --git a/vendor/github.com/go-chi/chi/chi.go b/vendor/github.com/go-chi/chi/chi.go index b7063dc297..5872efb17f 100644 --- a/vendor/github.com/go-chi/chi/chi.go +++ b/vendor/github.com/go-chi/chi/chi.go @@ -1,134 +1,134 @@ -// -// Package chi is a small, idiomatic and composable router for building HTTP services. -// -// chi requires Go 1.10 or newer. -// -// Example: -// package main -// -// import ( -// "net/http" -// -// "github.com/go-chi/chi" -// "github.com/go-chi/chi/middleware" -// ) -// -// func main() { -// r := chi.NewRouter() -// r.Use(middleware.Logger) -// r.Use(middleware.Recoverer) -// -// r.Get("/", func(w http.ResponseWriter, r *http.Request) { -// w.Write([]byte("root.")) -// }) -// -// http.ListenAndServe(":3333", r) -// } -// -// See github.com/go-chi/chi/_examples/ for more in-depth examples. -// -// URL patterns allow for easy matching of path components in HTTP -// requests. The matching components can then be accessed using -// chi.URLParam(). All patterns must begin with a slash. -// -// A simple named placeholder {name} matches any sequence of characters -// up to the next / or the end of the URL. Trailing slashes on paths must -// be handled explicitly. -// -// A placeholder with a name followed by a colon allows a regular -// expression match, for example {number:\\d+}. The regular expression -// syntax is Go's normal regexp RE2 syntax, except that regular expressions -// including { or } are not supported, and / will never be -// matched. An anonymous regexp pattern is allowed, using an empty string -// before the colon in the placeholder, such as {:\\d+} -// -// The special placeholder of asterisk matches the rest of the requested -// URL. Any trailing characters in the pattern are ignored. This is the only -// placeholder which will match / characters. -// -// Examples: -// "/user/{name}" matches "/user/jsmith" but not "/user/jsmith/info" or "/user/jsmith/" -// "/user/{name}/info" matches "/user/jsmith/info" -// "/page/*" matches "/page/intro/latest" -// "/page/*/index" also matches "/page/intro/latest" -// "/date/{yyyy:\\d\\d\\d\\d}/{mm:\\d\\d}/{dd:\\d\\d}" matches "/date/2017/04/01" -// -package chi - -import "net/http" - -// NewRouter returns a new Mux object that implements the Router interface. -func NewRouter() *Mux { - return NewMux() -} - -// Router consisting of the core routing methods used by chi's Mux, -// using only the standard net/http. -type Router interface { - http.Handler - Routes - - // Use appends one or more middlewares onto the Router stack. - Use(middlewares ...func(http.Handler) http.Handler) - - // With adds inline middlewares for an endpoint handler. - With(middlewares ...func(http.Handler) http.Handler) Router - - // Group adds a new inline-Router along the current routing - // path, with a fresh middleware stack for the inline-Router. - Group(fn func(r Router)) Router - - // Route mounts a sub-Router along a `pattern`` string. - Route(pattern string, fn func(r Router)) Router - - // Mount attaches another http.Handler along ./pattern/* - Mount(pattern string, h http.Handler) - - // Handle and HandleFunc adds routes for `pattern` that matches - // all HTTP methods. - Handle(pattern string, h http.Handler) - HandleFunc(pattern string, h http.HandlerFunc) - - // Method and MethodFunc adds routes for `pattern` that matches - // the `method` HTTP method. - Method(method, pattern string, h http.Handler) - MethodFunc(method, pattern string, h http.HandlerFunc) - - // HTTP-method routing along `pattern` - Connect(pattern string, h http.HandlerFunc) - Delete(pattern string, h http.HandlerFunc) - Get(pattern string, h http.HandlerFunc) - Head(pattern string, h http.HandlerFunc) - Options(pattern string, h http.HandlerFunc) - Patch(pattern string, h http.HandlerFunc) - Post(pattern string, h http.HandlerFunc) - Put(pattern string, h http.HandlerFunc) - Trace(pattern string, h http.HandlerFunc) - - // NotFound defines a handler to respond whenever a route could - // not be found. - NotFound(h http.HandlerFunc) - - // MethodNotAllowed defines a handler to respond whenever a method is - // not allowed. - MethodNotAllowed(h http.HandlerFunc) -} - -// Routes interface adds two methods for router traversal, which is also -// used by the `docgen` subpackage to generation documentation for Routers. -type Routes interface { - // Routes returns the routing tree in an easily traversable structure. - Routes() []Route - - // Middlewares returns the list of middlewares in use by the router. - Middlewares() Middlewares - - // Match searches the routing tree for a handler that matches - // the method/path - similar to routing a http request, but without - // executing the handler thereafter. - Match(rctx *Context, method, path string) bool -} - -// Middlewares type is a slice of standard middleware handlers with methods -// to compose middleware chains and http.Handler's. -type Middlewares []func(http.Handler) http.Handler +// +// Package chi is a small, idiomatic and composable router for building HTTP services. +// +// chi requires Go 1.10 or newer. +// +// Example: +// package main +// +// import ( +// "net/http" +// +// "github.com/go-chi/chi" +// "github.com/go-chi/chi/middleware" +// ) +// +// func main() { +// r := chi.NewRouter() +// r.Use(middleware.Logger) +// r.Use(middleware.Recoverer) +// +// r.Get("/", func(w http.ResponseWriter, r *http.Request) { +// w.Write([]byte("root.")) +// }) +// +// http.ListenAndServe(":3333", r) +// } +// +// See github.com/go-chi/chi/_examples/ for more in-depth examples. +// +// URL patterns allow for easy matching of path components in HTTP +// requests. The matching components can then be accessed using +// chi.URLParam(). All patterns must begin with a slash. +// +// A simple named placeholder {name} matches any sequence of characters +// up to the next / or the end of the URL. Trailing slashes on paths must +// be handled explicitly. +// +// A placeholder with a name followed by a colon allows a regular +// expression match, for example {number:\\d+}. The regular expression +// syntax is Go's normal regexp RE2 syntax, except that regular expressions +// including { or } are not supported, and / will never be +// matched. An anonymous regexp pattern is allowed, using an empty string +// before the colon in the placeholder, such as {:\\d+} +// +// The special placeholder of asterisk matches the rest of the requested +// URL. Any trailing characters in the pattern are ignored. This is the only +// placeholder which will match / characters. +// +// Examples: +// "/user/{name}" matches "/user/jsmith" but not "/user/jsmith/info" or "/user/jsmith/" +// "/user/{name}/info" matches "/user/jsmith/info" +// "/page/*" matches "/page/intro/latest" +// "/page/*/index" also matches "/page/intro/latest" +// "/date/{yyyy:\\d\\d\\d\\d}/{mm:\\d\\d}/{dd:\\d\\d}" matches "/date/2017/04/01" +// +package chi + +import "net/http" + +// NewRouter returns a new Mux object that implements the Router interface. +func NewRouter() *Mux { + return NewMux() +} + +// Router consisting of the core routing methods used by chi's Mux, +// using only the standard net/http. +type Router interface { + http.Handler + Routes + + // Use appends one or more middlewares onto the Router stack. + Use(middlewares ...func(http.Handler) http.Handler) + + // With adds inline middlewares for an endpoint handler. + With(middlewares ...func(http.Handler) http.Handler) Router + + // Group adds a new inline-Router along the current routing + // path, with a fresh middleware stack for the inline-Router. + Group(fn func(r Router)) Router + + // Route mounts a sub-Router along a `pattern`` string. + Route(pattern string, fn func(r Router)) Router + + // Mount attaches another http.Handler along ./pattern/* + Mount(pattern string, h http.Handler) + + // Handle and HandleFunc adds routes for `pattern` that matches + // all HTTP methods. + Handle(pattern string, h http.Handler) + HandleFunc(pattern string, h http.HandlerFunc) + + // Method and MethodFunc adds routes for `pattern` that matches + // the `method` HTTP method. + Method(method, pattern string, h http.Handler) + MethodFunc(method, pattern string, h http.HandlerFunc) + + // HTTP-method routing along `pattern` + Connect(pattern string, h http.HandlerFunc) + Delete(pattern string, h http.HandlerFunc) + Get(pattern string, h http.HandlerFunc) + Head(pattern string, h http.HandlerFunc) + Options(pattern string, h http.HandlerFunc) + Patch(pattern string, h http.HandlerFunc) + Post(pattern string, h http.HandlerFunc) + Put(pattern string, h http.HandlerFunc) + Trace(pattern string, h http.HandlerFunc) + + // NotFound defines a handler to respond whenever a route could + // not be found. + NotFound(h http.HandlerFunc) + + // MethodNotAllowed defines a handler to respond whenever a method is + // not allowed. + MethodNotAllowed(h http.HandlerFunc) +} + +// Routes interface adds two methods for router traversal, which is also +// used by the `docgen` subpackage to generation documentation for Routers. +type Routes interface { + // Routes returns the routing tree in an easily traversable structure. + Routes() []Route + + // Middlewares returns the list of middlewares in use by the router. + Middlewares() Middlewares + + // Match searches the routing tree for a handler that matches + // the method/path - similar to routing a http request, but without + // executing the handler thereafter. + Match(rctx *Context, method, path string) bool +} + +// Middlewares type is a slice of standard middleware handlers with methods +// to compose middleware chains and http.Handler's. +type Middlewares []func(http.Handler) http.Handler diff --git a/vendor/github.com/go-chi/chi/context.go b/vendor/github.com/go-chi/chi/context.go index 8c97f214a9..9f60d0a282 100644 --- a/vendor/github.com/go-chi/chi/context.go +++ b/vendor/github.com/go-chi/chi/context.go @@ -1,157 +1,157 @@ -package chi - -import ( - "context" - "net/http" - "strings" -) - -// URLParam returns the url parameter from a http.Request object. -func URLParam(r *http.Request, key string) string { - if rctx := RouteContext(r.Context()); rctx != nil { - return rctx.URLParam(key) - } - return "" -} - -// URLParamFromCtx returns the url parameter from a http.Request Context. -func URLParamFromCtx(ctx context.Context, key string) string { - if rctx := RouteContext(ctx); rctx != nil { - return rctx.URLParam(key) - } - return "" -} - -// RouteContext returns chi's routing Context object from a -// http.Request Context. -func RouteContext(ctx context.Context) *Context { - val, _ := ctx.Value(RouteCtxKey).(*Context) - return val -} - -// NewRouteContext returns a new routing Context object. -func NewRouteContext() *Context { - return &Context{} -} - -var ( - // RouteCtxKey is the context.Context key to store the request context. - RouteCtxKey = &contextKey{"RouteContext"} -) - -// Context is the default routing context set on the root node of a -// request context to track route patterns, URL parameters and -// an optional routing path. -type Context struct { - Routes Routes - - // Routing path/method override used during the route search. - // See Mux#routeHTTP method. - RoutePath string - RouteMethod string - - // Routing pattern stack throughout the lifecycle of the request, - // across all connected routers. It is a record of all matching - // patterns across a stack of sub-routers. - RoutePatterns []string - - // URLParams are the stack of routeParams captured during the - // routing lifecycle across a stack of sub-routers. - URLParams RouteParams - - // The endpoint routing pattern that matched the request URI path - // or `RoutePath` of the current sub-router. This value will update - // during the lifecycle of a request passing through a stack of - // sub-routers. - routePattern string - - // Route parameters matched for the current sub-router. It is - // intentionally unexported so it cant be tampered. - routeParams RouteParams - - // methodNotAllowed hint - methodNotAllowed bool - - // parentCtx is the parent of this one, for using Context as a - // context.Context directly. This is an optimization that saves - // 1 allocation. - parentCtx context.Context -} - -// Reset a routing context to its initial state. -func (x *Context) Reset() { - x.Routes = nil - x.RoutePath = "" - x.RouteMethod = "" - x.RoutePatterns = x.RoutePatterns[:0] - x.URLParams.Keys = x.URLParams.Keys[:0] - x.URLParams.Values = x.URLParams.Values[:0] - - x.routePattern = "" - x.routeParams.Keys = x.routeParams.Keys[:0] - x.routeParams.Values = x.routeParams.Values[:0] - x.methodNotAllowed = false - x.parentCtx = nil -} - -// URLParam returns the corresponding URL parameter value from the request -// routing context. -func (x *Context) URLParam(key string) string { - for k := len(x.URLParams.Keys) - 1; k >= 0; k-- { - if x.URLParams.Keys[k] == key { - return x.URLParams.Values[k] - } - } - return "" -} - -// RoutePattern builds the routing pattern string for the particular -// request, at the particular point during routing. This means, the value -// will change throughout the execution of a request in a router. That is -// why its advised to only use this value after calling the next handler. -// -// For example, -// -// func Instrument(next http.Handler) http.Handler { -// return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { -// next.ServeHTTP(w, r) -// routePattern := chi.RouteContext(r.Context()).RoutePattern() -// measure(w, r, routePattern) -// }) -// } -func (x *Context) RoutePattern() string { - routePattern := strings.Join(x.RoutePatterns, "") - return replaceWildcards(routePattern) -} - -// replaceWildcards takes a route pattern and recursively replaces all -// occurrences of "/*/" to "/". -func replaceWildcards(p string) string { - if strings.Contains(p, "/*/") { - return replaceWildcards(strings.Replace(p, "/*/", "/", -1)) - } - - return p -} - -// RouteParams is a structure to track URL routing parameters efficiently. -type RouteParams struct { - Keys, Values []string -} - -// Add will append a URL parameter to the end of the route param -func (s *RouteParams) Add(key, value string) { - s.Keys = append(s.Keys, key) - s.Values = append(s.Values, value) -} - -// contextKey is a value for use with context.WithValue. It's used as -// a pointer so it fits in an interface{} without allocation. This technique -// for defining context keys was copied from Go 1.7's new use of context in net/http. -type contextKey struct { - name string -} - -func (k *contextKey) String() string { - return "chi context value " + k.name -} +package chi + +import ( + "context" + "net/http" + "strings" +) + +// URLParam returns the url parameter from a http.Request object. +func URLParam(r *http.Request, key string) string { + if rctx := RouteContext(r.Context()); rctx != nil { + return rctx.URLParam(key) + } + return "" +} + +// URLParamFromCtx returns the url parameter from a http.Request Context. +func URLParamFromCtx(ctx context.Context, key string) string { + if rctx := RouteContext(ctx); rctx != nil { + return rctx.URLParam(key) + } + return "" +} + +// RouteContext returns chi's routing Context object from a +// http.Request Context. +func RouteContext(ctx context.Context) *Context { + val, _ := ctx.Value(RouteCtxKey).(*Context) + return val +} + +// NewRouteContext returns a new routing Context object. +func NewRouteContext() *Context { + return &Context{} +} + +var ( + // RouteCtxKey is the context.Context key to store the request context. + RouteCtxKey = &contextKey{"RouteContext"} +) + +// Context is the default routing context set on the root node of a +// request context to track route patterns, URL parameters and +// an optional routing path. +type Context struct { + Routes Routes + + // Routing path/method override used during the route search. + // See Mux#routeHTTP method. + RoutePath string + RouteMethod string + + // Routing pattern stack throughout the lifecycle of the request, + // across all connected routers. It is a record of all matching + // patterns across a stack of sub-routers. + RoutePatterns []string + + // URLParams are the stack of routeParams captured during the + // routing lifecycle across a stack of sub-routers. + URLParams RouteParams + + // The endpoint routing pattern that matched the request URI path + // or `RoutePath` of the current sub-router. This value will update + // during the lifecycle of a request passing through a stack of + // sub-routers. + routePattern string + + // Route parameters matched for the current sub-router. It is + // intentionally unexported so it cant be tampered. + routeParams RouteParams + + // methodNotAllowed hint + methodNotAllowed bool + + // parentCtx is the parent of this one, for using Context as a + // context.Context directly. This is an optimization that saves + // 1 allocation. + parentCtx context.Context +} + +// Reset a routing context to its initial state. +func (x *Context) Reset() { + x.Routes = nil + x.RoutePath = "" + x.RouteMethod = "" + x.RoutePatterns = x.RoutePatterns[:0] + x.URLParams.Keys = x.URLParams.Keys[:0] + x.URLParams.Values = x.URLParams.Values[:0] + + x.routePattern = "" + x.routeParams.Keys = x.routeParams.Keys[:0] + x.routeParams.Values = x.routeParams.Values[:0] + x.methodNotAllowed = false + x.parentCtx = nil +} + +// URLParam returns the corresponding URL parameter value from the request +// routing context. +func (x *Context) URLParam(key string) string { + for k := len(x.URLParams.Keys) - 1; k >= 0; k-- { + if x.URLParams.Keys[k] == key { + return x.URLParams.Values[k] + } + } + return "" +} + +// RoutePattern builds the routing pattern string for the particular +// request, at the particular point during routing. This means, the value +// will change throughout the execution of a request in a router. That is +// why its advised to only use this value after calling the next handler. +// +// For example, +// +// func Instrument(next http.Handler) http.Handler { +// return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { +// next.ServeHTTP(w, r) +// routePattern := chi.RouteContext(r.Context()).RoutePattern() +// measure(w, r, routePattern) +// }) +// } +func (x *Context) RoutePattern() string { + routePattern := strings.Join(x.RoutePatterns, "") + return replaceWildcards(routePattern) +} + +// replaceWildcards takes a route pattern and recursively replaces all +// occurrences of "/*/" to "/". +func replaceWildcards(p string) string { + if strings.Contains(p, "/*/") { + return replaceWildcards(strings.Replace(p, "/*/", "/", -1)) + } + + return p +} + +// RouteParams is a structure to track URL routing parameters efficiently. +type RouteParams struct { + Keys, Values []string +} + +// Add will append a URL parameter to the end of the route param +func (s *RouteParams) Add(key, value string) { + s.Keys = append(s.Keys, key) + s.Values = append(s.Values, value) +} + +// contextKey is a value for use with context.WithValue. It's used as +// a pointer so it fits in an interface{} without allocation. This technique +// for defining context keys was copied from Go 1.7's new use of context in net/http. +type contextKey struct { + name string +} + +func (k *contextKey) String() string { + return "chi context value " + k.name +} diff --git a/vendor/github.com/go-chi/chi/mux.go b/vendor/github.com/go-chi/chi/mux.go index 146643b04c..793fb880fc 100644 --- a/vendor/github.com/go-chi/chi/mux.go +++ b/vendor/github.com/go-chi/chi/mux.go @@ -1,479 +1,479 @@ -package chi - -import ( - "context" - "fmt" - "net/http" - "strings" - "sync" -) - -var _ Router = &Mux{} - -// Mux is a simple HTTP route multiplexer that parses a request path, -// records any URL params, and executes an end handler. It implements -// the http.Handler interface and is friendly with the standard library. -// -// Mux is designed to be fast, minimal and offer a powerful API for building -// modular and composable HTTP services with a large set of handlers. It's -// particularly useful for writing large REST API services that break a handler -// into many smaller parts composed of middlewares and end handlers. -type Mux struct { - // The radix trie router - tree *node - - // The middleware stack - middlewares []func(http.Handler) http.Handler - - // Controls the behaviour of middleware chain generation when a mux - // is registered as an inline group inside another mux. - inline bool - parent *Mux - - // The computed mux handler made of the chained middleware stack and - // the tree router - handler http.Handler - - // Routing context pool - pool *sync.Pool - - // Custom route not found handler - notFoundHandler http.HandlerFunc - - // Custom method not allowed handler - methodNotAllowedHandler http.HandlerFunc -} - -// NewMux returns a newly initialized Mux object that implements the Router -// interface. -func NewMux() *Mux { - mux := &Mux{tree: &node{}, pool: &sync.Pool{}} - mux.pool.New = func() interface{} { - return NewRouteContext() - } - return mux -} - -// ServeHTTP is the single method of the http.Handler interface that makes -// Mux interoperable with the standard library. It uses a sync.Pool to get and -// reuse routing contexts for each request. -func (mx *Mux) ServeHTTP(w http.ResponseWriter, r *http.Request) { - // Ensure the mux has some routes defined on the mux - if mx.handler == nil { - mx.NotFoundHandler().ServeHTTP(w, r) - return - } - - // Check if a routing context already exists from a parent router. - rctx, _ := r.Context().Value(RouteCtxKey).(*Context) - if rctx != nil { - mx.handler.ServeHTTP(w, r) - return - } - - // Fetch a RouteContext object from the sync pool, and call the computed - // mx.handler that is comprised of mx.middlewares + mx.routeHTTP. - // Once the request is finished, reset the routing context and put it back - // into the pool for reuse from another request. - rctx = mx.pool.Get().(*Context) - rctx.Reset() - rctx.Routes = mx - rctx.parentCtx = r.Context() - - // NOTE: r.WithContext() causes 2 allocations and context.WithValue() causes 1 allocation - r = r.WithContext(context.WithValue(r.Context(), RouteCtxKey, rctx)) - - // Serve the request and once its done, put the request context back in the sync pool - mx.handler.ServeHTTP(w, r) - mx.pool.Put(rctx) -} - -// Use appends a middleware handler to the Mux middleware stack. -// -// The middleware stack for any Mux will execute before searching for a matching -// route to a specific handler, which provides opportunity to respond early, -// change the course of the request execution, or set request-scoped values for -// the next http.Handler. -func (mx *Mux) Use(middlewares ...func(http.Handler) http.Handler) { - if mx.handler != nil { - panic("chi: all middlewares must be defined before routes on a mux") - } - mx.middlewares = append(mx.middlewares, middlewares...) -} - -// Handle adds the route `pattern` that matches any http method to -// execute the `handler` http.Handler. -func (mx *Mux) Handle(pattern string, handler http.Handler) { - mx.handle(mALL, pattern, handler) -} - -// HandleFunc adds the route `pattern` that matches any http method to -// execute the `handlerFn` http.HandlerFunc. -func (mx *Mux) HandleFunc(pattern string, handlerFn http.HandlerFunc) { - mx.handle(mALL, pattern, handlerFn) -} - -// Method adds the route `pattern` that matches `method` http method to -// execute the `handler` http.Handler. -func (mx *Mux) Method(method, pattern string, handler http.Handler) { - m, ok := methodMap[strings.ToUpper(method)] - if !ok { - panic(fmt.Sprintf("chi: '%s' http method is not supported.", method)) - } - mx.handle(m, pattern, handler) -} - -// MethodFunc adds the route `pattern` that matches `method` http method to -// execute the `handlerFn` http.HandlerFunc. -func (mx *Mux) MethodFunc(method, pattern string, handlerFn http.HandlerFunc) { - mx.Method(method, pattern, handlerFn) -} - -// Connect adds the route `pattern` that matches a CONNECT http method to -// execute the `handlerFn` http.HandlerFunc. -func (mx *Mux) Connect(pattern string, handlerFn http.HandlerFunc) { - mx.handle(mCONNECT, pattern, handlerFn) -} - -// Delete adds the route `pattern` that matches a DELETE http method to -// execute the `handlerFn` http.HandlerFunc. -func (mx *Mux) Delete(pattern string, handlerFn http.HandlerFunc) { - mx.handle(mDELETE, pattern, handlerFn) -} - -// Get adds the route `pattern` that matches a GET http method to -// execute the `handlerFn` http.HandlerFunc. -func (mx *Mux) Get(pattern string, handlerFn http.HandlerFunc) { - mx.handle(mGET, pattern, handlerFn) -} - -// Head adds the route `pattern` that matches a HEAD http method to -// execute the `handlerFn` http.HandlerFunc. -func (mx *Mux) Head(pattern string, handlerFn http.HandlerFunc) { - mx.handle(mHEAD, pattern, handlerFn) -} - -// Options adds the route `pattern` that matches a OPTIONS http method to -// execute the `handlerFn` http.HandlerFunc. -func (mx *Mux) Options(pattern string, handlerFn http.HandlerFunc) { - mx.handle(mOPTIONS, pattern, handlerFn) -} - -// Patch adds the route `pattern` that matches a PATCH http method to -// execute the `handlerFn` http.HandlerFunc. -func (mx *Mux) Patch(pattern string, handlerFn http.HandlerFunc) { - mx.handle(mPATCH, pattern, handlerFn) -} - -// Post adds the route `pattern` that matches a POST http method to -// execute the `handlerFn` http.HandlerFunc. -func (mx *Mux) Post(pattern string, handlerFn http.HandlerFunc) { - mx.handle(mPOST, pattern, handlerFn) -} - -// Put adds the route `pattern` that matches a PUT http method to -// execute the `handlerFn` http.HandlerFunc. -func (mx *Mux) Put(pattern string, handlerFn http.HandlerFunc) { - mx.handle(mPUT, pattern, handlerFn) -} - -// Trace adds the route `pattern` that matches a TRACE http method to -// execute the `handlerFn` http.HandlerFunc. -func (mx *Mux) Trace(pattern string, handlerFn http.HandlerFunc) { - mx.handle(mTRACE, pattern, handlerFn) -} - -// NotFound sets a custom http.HandlerFunc for routing paths that could -// not be found. The default 404 handler is `http.NotFound`. -func (mx *Mux) NotFound(handlerFn http.HandlerFunc) { - // Build NotFound handler chain - m := mx - h := Chain(mx.middlewares...).HandlerFunc(handlerFn).ServeHTTP - if mx.inline && mx.parent != nil { - m = mx.parent - } - - // Update the notFoundHandler from this point forward - m.notFoundHandler = h - m.updateSubRoutes(func(subMux *Mux) { - if subMux.notFoundHandler == nil { - subMux.NotFound(h) - } - }) -} - -// MethodNotAllowed sets a custom http.HandlerFunc for routing paths where the -// method is unresolved. The default handler returns a 405 with an empty body. -func (mx *Mux) MethodNotAllowed(handlerFn http.HandlerFunc) { - // Build MethodNotAllowed handler chain - m := mx - h := Chain(mx.middlewares...).HandlerFunc(handlerFn).ServeHTTP - if mx.inline && mx.parent != nil { - m = mx.parent - } - - // Update the methodNotAllowedHandler from this point forward - m.methodNotAllowedHandler = h - m.updateSubRoutes(func(subMux *Mux) { - if subMux.methodNotAllowedHandler == nil { - subMux.MethodNotAllowed(h) - } - }) -} - -// With adds inline middlewares for an endpoint handler. -func (mx *Mux) With(middlewares ...func(http.Handler) http.Handler) Router { - // Similarly as in handle(), we must build the mux handler once additional - // middleware registration isn't allowed for this stack, like now. - if !mx.inline && mx.handler == nil { - mx.updateRouteHandler() - } - - // Copy middlewares from parent inline muxs - var mws Middlewares - if mx.inline { - mws = make(Middlewares, len(mx.middlewares)) - copy(mws, mx.middlewares) - } - mws = append(mws, middlewares...) - - im := &Mux{ - pool: mx.pool, inline: true, parent: mx, tree: mx.tree, middlewares: mws, - notFoundHandler: mx.notFoundHandler, methodNotAllowedHandler: mx.methodNotAllowedHandler, - } - - return im -} - -// Group creates a new inline-Mux with a fresh middleware stack. It's useful -// for a group of handlers along the same routing path that use an additional -// set of middlewares. See _examples/. -func (mx *Mux) Group(fn func(r Router)) Router { - im := mx.With().(*Mux) - if fn != nil { - fn(im) - } - return im -} - -// Route creates a new Mux with a fresh middleware stack and mounts it -// along the `pattern` as a subrouter. Effectively, this is a short-hand -// call to Mount. See _examples/. -func (mx *Mux) Route(pattern string, fn func(r Router)) Router { - if fn == nil { - panic(fmt.Sprintf("chi: attempting to Route() a nil subrouter on '%s'", pattern)) - } - subRouter := NewRouter() - fn(subRouter) - mx.Mount(pattern, subRouter) - return subRouter -} - -// Mount attaches another http.Handler or chi Router as a subrouter along a routing -// path. It's very useful to split up a large API as many independent routers and -// compose them as a single service using Mount. See _examples/. -// -// Note that Mount() simply sets a wildcard along the `pattern` that will continue -// routing at the `handler`, which in most cases is another chi.Router. As a result, -// if you define two Mount() routes on the exact same pattern the mount will panic. -func (mx *Mux) Mount(pattern string, handler http.Handler) { - if handler == nil { - panic(fmt.Sprintf("chi: attempting to Mount() a nil handler on '%s'", pattern)) - } - - // Provide runtime safety for ensuring a pattern isn't mounted on an existing - // routing pattern. - if mx.tree.findPattern(pattern+"*") || mx.tree.findPattern(pattern+"/*") { - panic(fmt.Sprintf("chi: attempting to Mount() a handler on an existing path, '%s'", pattern)) - } - - // Assign sub-Router's with the parent not found & method not allowed handler if not specified. - subr, ok := handler.(*Mux) - if ok && subr.notFoundHandler == nil && mx.notFoundHandler != nil { - subr.NotFound(mx.notFoundHandler) - } - if ok && subr.methodNotAllowedHandler == nil && mx.methodNotAllowedHandler != nil { - subr.MethodNotAllowed(mx.methodNotAllowedHandler) - } - - mountHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - rctx := RouteContext(r.Context()) - - // shift the url path past the previous subrouter - rctx.RoutePath = mx.nextRoutePath(rctx) - - // reset the wildcard URLParam which connects the subrouter - n := len(rctx.URLParams.Keys) - 1 - if n >= 0 && rctx.URLParams.Keys[n] == "*" && len(rctx.URLParams.Values) > n { - rctx.URLParams.Values[n] = "" - } - - handler.ServeHTTP(w, r) - }) - - if pattern == "" || pattern[len(pattern)-1] != '/' { - mx.handle(mALL|mSTUB, pattern, mountHandler) - mx.handle(mALL|mSTUB, pattern+"/", mountHandler) - pattern += "/" - } - - method := mALL - subroutes, _ := handler.(Routes) - if subroutes != nil { - method |= mSTUB - } - n := mx.handle(method, pattern+"*", mountHandler) - - if subroutes != nil { - n.subroutes = subroutes - } -} - -// Routes returns a slice of routing information from the tree, -// useful for traversing available routes of a router. -func (mx *Mux) Routes() []Route { - return mx.tree.routes() -} - -// Middlewares returns a slice of middleware handler functions. -func (mx *Mux) Middlewares() Middlewares { - return mx.middlewares -} - -// Match searches the routing tree for a handler that matches the method/path. -// It's similar to routing a http request, but without executing the handler -// thereafter. -// -// Note: the *Context state is updated during execution, so manage -// the state carefully or make a NewRouteContext(). -func (mx *Mux) Match(rctx *Context, method, path string) bool { - m, ok := methodMap[method] - if !ok { - return false - } - - node, _, h := mx.tree.FindRoute(rctx, m, path) - - if node != nil && node.subroutes != nil { - rctx.RoutePath = mx.nextRoutePath(rctx) - return node.subroutes.Match(rctx, method, rctx.RoutePath) - } - - return h != nil -} - -// NotFoundHandler returns the default Mux 404 responder whenever a route -// cannot be found. -func (mx *Mux) NotFoundHandler() http.HandlerFunc { - if mx.notFoundHandler != nil { - return mx.notFoundHandler - } - return http.NotFound -} - -// MethodNotAllowedHandler returns the default Mux 405 responder whenever -// a method cannot be resolved for a route. -func (mx *Mux) MethodNotAllowedHandler() http.HandlerFunc { - if mx.methodNotAllowedHandler != nil { - return mx.methodNotAllowedHandler - } - return methodNotAllowedHandler -} - -// handle registers a http.Handler in the routing tree for a particular http method -// and routing pattern. -func (mx *Mux) handle(method methodTyp, pattern string, handler http.Handler) *node { - if len(pattern) == 0 || pattern[0] != '/' { - panic(fmt.Sprintf("chi: routing pattern must begin with '/' in '%s'", pattern)) - } - - // Build the computed routing handler for this routing pattern. - if !mx.inline && mx.handler == nil { - mx.updateRouteHandler() - } - - // Build endpoint handler with inline middlewares for the route - var h http.Handler - if mx.inline { - mx.handler = http.HandlerFunc(mx.routeHTTP) - h = Chain(mx.middlewares...).Handler(handler) - } else { - h = handler - } - - // Add the endpoint to the tree and return the node - return mx.tree.InsertRoute(method, pattern, h) -} - -// routeHTTP routes a http.Request through the Mux routing tree to serve -// the matching handler for a particular http method. -func (mx *Mux) routeHTTP(w http.ResponseWriter, r *http.Request) { - // Grab the route context object - rctx := r.Context().Value(RouteCtxKey).(*Context) - - // The request routing path - routePath := rctx.RoutePath - if routePath == "" { - if r.URL.RawPath != "" { - routePath = r.URL.RawPath - } else { - routePath = r.URL.Path - } - } - - // Check if method is supported by chi - if rctx.RouteMethod == "" { - rctx.RouteMethod = r.Method - } - method, ok := methodMap[rctx.RouteMethod] - if !ok { - mx.MethodNotAllowedHandler().ServeHTTP(w, r) - return - } - - // Find the route - if _, _, h := mx.tree.FindRoute(rctx, method, routePath); h != nil { - h.ServeHTTP(w, r) - return - } - if rctx.methodNotAllowed { - mx.MethodNotAllowedHandler().ServeHTTP(w, r) - } else { - mx.NotFoundHandler().ServeHTTP(w, r) - } -} - -func (mx *Mux) nextRoutePath(rctx *Context) string { - routePath := "/" - nx := len(rctx.routeParams.Keys) - 1 // index of last param in list - if nx >= 0 && rctx.routeParams.Keys[nx] == "*" && len(rctx.routeParams.Values) > nx { - routePath = "/" + rctx.routeParams.Values[nx] - } - return routePath -} - -// Recursively update data on child routers. -func (mx *Mux) updateSubRoutes(fn func(subMux *Mux)) { - for _, r := range mx.tree.routes() { - subMux, ok := r.SubRoutes.(*Mux) - if !ok { - continue - } - fn(subMux) - } -} - -// updateRouteHandler builds the single mux handler that is a chain of the middleware -// stack, as defined by calls to Use(), and the tree router (Mux) itself. After this -// point, no other middlewares can be registered on this Mux's stack. But you can still -// compose additional middlewares via Group()'s or using a chained middleware handler. -func (mx *Mux) updateRouteHandler() { - mx.handler = chain(mx.middlewares, http.HandlerFunc(mx.routeHTTP)) -} - -// methodNotAllowedHandler is a helper function to respond with a 405, -// method not allowed. -func methodNotAllowedHandler(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(405) - w.Write(nil) -} +package chi + +import ( + "context" + "fmt" + "net/http" + "strings" + "sync" +) + +var _ Router = &Mux{} + +// Mux is a simple HTTP route multiplexer that parses a request path, +// records any URL params, and executes an end handler. It implements +// the http.Handler interface and is friendly with the standard library. +// +// Mux is designed to be fast, minimal and offer a powerful API for building +// modular and composable HTTP services with a large set of handlers. It's +// particularly useful for writing large REST API services that break a handler +// into many smaller parts composed of middlewares and end handlers. +type Mux struct { + // The radix trie router + tree *node + + // The middleware stack + middlewares []func(http.Handler) http.Handler + + // Controls the behaviour of middleware chain generation when a mux + // is registered as an inline group inside another mux. + inline bool + parent *Mux + + // The computed mux handler made of the chained middleware stack and + // the tree router + handler http.Handler + + // Routing context pool + pool *sync.Pool + + // Custom route not found handler + notFoundHandler http.HandlerFunc + + // Custom method not allowed handler + methodNotAllowedHandler http.HandlerFunc +} + +// NewMux returns a newly initialized Mux object that implements the Router +// interface. +func NewMux() *Mux { + mux := &Mux{tree: &node{}, pool: &sync.Pool{}} + mux.pool.New = func() interface{} { + return NewRouteContext() + } + return mux +} + +// ServeHTTP is the single method of the http.Handler interface that makes +// Mux interoperable with the standard library. It uses a sync.Pool to get and +// reuse routing contexts for each request. +func (mx *Mux) ServeHTTP(w http.ResponseWriter, r *http.Request) { + // Ensure the mux has some routes defined on the mux + if mx.handler == nil { + mx.NotFoundHandler().ServeHTTP(w, r) + return + } + + // Check if a routing context already exists from a parent router. + rctx, _ := r.Context().Value(RouteCtxKey).(*Context) + if rctx != nil { + mx.handler.ServeHTTP(w, r) + return + } + + // Fetch a RouteContext object from the sync pool, and call the computed + // mx.handler that is comprised of mx.middlewares + mx.routeHTTP. + // Once the request is finished, reset the routing context and put it back + // into the pool for reuse from another request. + rctx = mx.pool.Get().(*Context) + rctx.Reset() + rctx.Routes = mx + rctx.parentCtx = r.Context() + + // NOTE: r.WithContext() causes 2 allocations and context.WithValue() causes 1 allocation + r = r.WithContext(context.WithValue(r.Context(), RouteCtxKey, rctx)) + + // Serve the request and once its done, put the request context back in the sync pool + mx.handler.ServeHTTP(w, r) + mx.pool.Put(rctx) +} + +// Use appends a middleware handler to the Mux middleware stack. +// +// The middleware stack for any Mux will execute before searching for a matching +// route to a specific handler, which provides opportunity to respond early, +// change the course of the request execution, or set request-scoped values for +// the next http.Handler. +func (mx *Mux) Use(middlewares ...func(http.Handler) http.Handler) { + if mx.handler != nil { + panic("chi: all middlewares must be defined before routes on a mux") + } + mx.middlewares = append(mx.middlewares, middlewares...) +} + +// Handle adds the route `pattern` that matches any http method to +// execute the `handler` http.Handler. +func (mx *Mux) Handle(pattern string, handler http.Handler) { + mx.handle(mALL, pattern, handler) +} + +// HandleFunc adds the route `pattern` that matches any http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) HandleFunc(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mALL, pattern, handlerFn) +} + +// Method adds the route `pattern` that matches `method` http method to +// execute the `handler` http.Handler. +func (mx *Mux) Method(method, pattern string, handler http.Handler) { + m, ok := methodMap[strings.ToUpper(method)] + if !ok { + panic(fmt.Sprintf("chi: '%s' http method is not supported.", method)) + } + mx.handle(m, pattern, handler) +} + +// MethodFunc adds the route `pattern` that matches `method` http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) MethodFunc(method, pattern string, handlerFn http.HandlerFunc) { + mx.Method(method, pattern, handlerFn) +} + +// Connect adds the route `pattern` that matches a CONNECT http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Connect(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mCONNECT, pattern, handlerFn) +} + +// Delete adds the route `pattern` that matches a DELETE http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Delete(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mDELETE, pattern, handlerFn) +} + +// Get adds the route `pattern` that matches a GET http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Get(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mGET, pattern, handlerFn) +} + +// Head adds the route `pattern` that matches a HEAD http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Head(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mHEAD, pattern, handlerFn) +} + +// Options adds the route `pattern` that matches a OPTIONS http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Options(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mOPTIONS, pattern, handlerFn) +} + +// Patch adds the route `pattern` that matches a PATCH http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Patch(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mPATCH, pattern, handlerFn) +} + +// Post adds the route `pattern` that matches a POST http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Post(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mPOST, pattern, handlerFn) +} + +// Put adds the route `pattern` that matches a PUT http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Put(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mPUT, pattern, handlerFn) +} + +// Trace adds the route `pattern` that matches a TRACE http method to +// execute the `handlerFn` http.HandlerFunc. +func (mx *Mux) Trace(pattern string, handlerFn http.HandlerFunc) { + mx.handle(mTRACE, pattern, handlerFn) +} + +// NotFound sets a custom http.HandlerFunc for routing paths that could +// not be found. The default 404 handler is `http.NotFound`. +func (mx *Mux) NotFound(handlerFn http.HandlerFunc) { + // Build NotFound handler chain + m := mx + h := Chain(mx.middlewares...).HandlerFunc(handlerFn).ServeHTTP + if mx.inline && mx.parent != nil { + m = mx.parent + } + + // Update the notFoundHandler from this point forward + m.notFoundHandler = h + m.updateSubRoutes(func(subMux *Mux) { + if subMux.notFoundHandler == nil { + subMux.NotFound(h) + } + }) +} + +// MethodNotAllowed sets a custom http.HandlerFunc for routing paths where the +// method is unresolved. The default handler returns a 405 with an empty body. +func (mx *Mux) MethodNotAllowed(handlerFn http.HandlerFunc) { + // Build MethodNotAllowed handler chain + m := mx + h := Chain(mx.middlewares...).HandlerFunc(handlerFn).ServeHTTP + if mx.inline && mx.parent != nil { + m = mx.parent + } + + // Update the methodNotAllowedHandler from this point forward + m.methodNotAllowedHandler = h + m.updateSubRoutes(func(subMux *Mux) { + if subMux.methodNotAllowedHandler == nil { + subMux.MethodNotAllowed(h) + } + }) +} + +// With adds inline middlewares for an endpoint handler. +func (mx *Mux) With(middlewares ...func(http.Handler) http.Handler) Router { + // Similarly as in handle(), we must build the mux handler once additional + // middleware registration isn't allowed for this stack, like now. + if !mx.inline && mx.handler == nil { + mx.updateRouteHandler() + } + + // Copy middlewares from parent inline muxs + var mws Middlewares + if mx.inline { + mws = make(Middlewares, len(mx.middlewares)) + copy(mws, mx.middlewares) + } + mws = append(mws, middlewares...) + + im := &Mux{ + pool: mx.pool, inline: true, parent: mx, tree: mx.tree, middlewares: mws, + notFoundHandler: mx.notFoundHandler, methodNotAllowedHandler: mx.methodNotAllowedHandler, + } + + return im +} + +// Group creates a new inline-Mux with a fresh middleware stack. It's useful +// for a group of handlers along the same routing path that use an additional +// set of middlewares. See _examples/. +func (mx *Mux) Group(fn func(r Router)) Router { + im := mx.With().(*Mux) + if fn != nil { + fn(im) + } + return im +} + +// Route creates a new Mux with a fresh middleware stack and mounts it +// along the `pattern` as a subrouter. Effectively, this is a short-hand +// call to Mount. See _examples/. +func (mx *Mux) Route(pattern string, fn func(r Router)) Router { + if fn == nil { + panic(fmt.Sprintf("chi: attempting to Route() a nil subrouter on '%s'", pattern)) + } + subRouter := NewRouter() + fn(subRouter) + mx.Mount(pattern, subRouter) + return subRouter +} + +// Mount attaches another http.Handler or chi Router as a subrouter along a routing +// path. It's very useful to split up a large API as many independent routers and +// compose them as a single service using Mount. See _examples/. +// +// Note that Mount() simply sets a wildcard along the `pattern` that will continue +// routing at the `handler`, which in most cases is another chi.Router. As a result, +// if you define two Mount() routes on the exact same pattern the mount will panic. +func (mx *Mux) Mount(pattern string, handler http.Handler) { + if handler == nil { + panic(fmt.Sprintf("chi: attempting to Mount() a nil handler on '%s'", pattern)) + } + + // Provide runtime safety for ensuring a pattern isn't mounted on an existing + // routing pattern. + if mx.tree.findPattern(pattern+"*") || mx.tree.findPattern(pattern+"/*") { + panic(fmt.Sprintf("chi: attempting to Mount() a handler on an existing path, '%s'", pattern)) + } + + // Assign sub-Router's with the parent not found & method not allowed handler if not specified. + subr, ok := handler.(*Mux) + if ok && subr.notFoundHandler == nil && mx.notFoundHandler != nil { + subr.NotFound(mx.notFoundHandler) + } + if ok && subr.methodNotAllowedHandler == nil && mx.methodNotAllowedHandler != nil { + subr.MethodNotAllowed(mx.methodNotAllowedHandler) + } + + mountHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + rctx := RouteContext(r.Context()) + + // shift the url path past the previous subrouter + rctx.RoutePath = mx.nextRoutePath(rctx) + + // reset the wildcard URLParam which connects the subrouter + n := len(rctx.URLParams.Keys) - 1 + if n >= 0 && rctx.URLParams.Keys[n] == "*" && len(rctx.URLParams.Values) > n { + rctx.URLParams.Values[n] = "" + } + + handler.ServeHTTP(w, r) + }) + + if pattern == "" || pattern[len(pattern)-1] != '/' { + mx.handle(mALL|mSTUB, pattern, mountHandler) + mx.handle(mALL|mSTUB, pattern+"/", mountHandler) + pattern += "/" + } + + method := mALL + subroutes, _ := handler.(Routes) + if subroutes != nil { + method |= mSTUB + } + n := mx.handle(method, pattern+"*", mountHandler) + + if subroutes != nil { + n.subroutes = subroutes + } +} + +// Routes returns a slice of routing information from the tree, +// useful for traversing available routes of a router. +func (mx *Mux) Routes() []Route { + return mx.tree.routes() +} + +// Middlewares returns a slice of middleware handler functions. +func (mx *Mux) Middlewares() Middlewares { + return mx.middlewares +} + +// Match searches the routing tree for a handler that matches the method/path. +// It's similar to routing a http request, but without executing the handler +// thereafter. +// +// Note: the *Context state is updated during execution, so manage +// the state carefully or make a NewRouteContext(). +func (mx *Mux) Match(rctx *Context, method, path string) bool { + m, ok := methodMap[method] + if !ok { + return false + } + + node, _, h := mx.tree.FindRoute(rctx, m, path) + + if node != nil && node.subroutes != nil { + rctx.RoutePath = mx.nextRoutePath(rctx) + return node.subroutes.Match(rctx, method, rctx.RoutePath) + } + + return h != nil +} + +// NotFoundHandler returns the default Mux 404 responder whenever a route +// cannot be found. +func (mx *Mux) NotFoundHandler() http.HandlerFunc { + if mx.notFoundHandler != nil { + return mx.notFoundHandler + } + return http.NotFound +} + +// MethodNotAllowedHandler returns the default Mux 405 responder whenever +// a method cannot be resolved for a route. +func (mx *Mux) MethodNotAllowedHandler() http.HandlerFunc { + if mx.methodNotAllowedHandler != nil { + return mx.methodNotAllowedHandler + } + return methodNotAllowedHandler +} + +// handle registers a http.Handler in the routing tree for a particular http method +// and routing pattern. +func (mx *Mux) handle(method methodTyp, pattern string, handler http.Handler) *node { + if len(pattern) == 0 || pattern[0] != '/' { + panic(fmt.Sprintf("chi: routing pattern must begin with '/' in '%s'", pattern)) + } + + // Build the computed routing handler for this routing pattern. + if !mx.inline && mx.handler == nil { + mx.updateRouteHandler() + } + + // Build endpoint handler with inline middlewares for the route + var h http.Handler + if mx.inline { + mx.handler = http.HandlerFunc(mx.routeHTTP) + h = Chain(mx.middlewares...).Handler(handler) + } else { + h = handler + } + + // Add the endpoint to the tree and return the node + return mx.tree.InsertRoute(method, pattern, h) +} + +// routeHTTP routes a http.Request through the Mux routing tree to serve +// the matching handler for a particular http method. +func (mx *Mux) routeHTTP(w http.ResponseWriter, r *http.Request) { + // Grab the route context object + rctx := r.Context().Value(RouteCtxKey).(*Context) + + // The request routing path + routePath := rctx.RoutePath + if routePath == "" { + if r.URL.RawPath != "" { + routePath = r.URL.RawPath + } else { + routePath = r.URL.Path + } + } + + // Check if method is supported by chi + if rctx.RouteMethod == "" { + rctx.RouteMethod = r.Method + } + method, ok := methodMap[rctx.RouteMethod] + if !ok { + mx.MethodNotAllowedHandler().ServeHTTP(w, r) + return + } + + // Find the route + if _, _, h := mx.tree.FindRoute(rctx, method, routePath); h != nil { + h.ServeHTTP(w, r) + return + } + if rctx.methodNotAllowed { + mx.MethodNotAllowedHandler().ServeHTTP(w, r) + } else { + mx.NotFoundHandler().ServeHTTP(w, r) + } +} + +func (mx *Mux) nextRoutePath(rctx *Context) string { + routePath := "/" + nx := len(rctx.routeParams.Keys) - 1 // index of last param in list + if nx >= 0 && rctx.routeParams.Keys[nx] == "*" && len(rctx.routeParams.Values) > nx { + routePath = "/" + rctx.routeParams.Values[nx] + } + return routePath +} + +// Recursively update data on child routers. +func (mx *Mux) updateSubRoutes(fn func(subMux *Mux)) { + for _, r := range mx.tree.routes() { + subMux, ok := r.SubRoutes.(*Mux) + if !ok { + continue + } + fn(subMux) + } +} + +// updateRouteHandler builds the single mux handler that is a chain of the middleware +// stack, as defined by calls to Use(), and the tree router (Mux) itself. After this +// point, no other middlewares can be registered on this Mux's stack. But you can still +// compose additional middlewares via Group()'s or using a chained middleware handler. +func (mx *Mux) updateRouteHandler() { + mx.handler = chain(mx.middlewares, http.HandlerFunc(mx.routeHTTP)) +} + +// methodNotAllowedHandler is a helper function to respond with a 405, +// method not allowed. +func methodNotAllowedHandler(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(405) + w.Write(nil) +} diff --git a/vendor/github.com/go-chi/chi/tree.go b/vendor/github.com/go-chi/chi/tree.go index 8057c52813..41954970a0 100644 --- a/vendor/github.com/go-chi/chi/tree.go +++ b/vendor/github.com/go-chi/chi/tree.go @@ -1,866 +1,866 @@ -package chi - -// Radix tree implementation below is a based on the original work by -// Armon Dadgar in https://github.com/armon/go-radix/blob/master/radix.go -// (MIT licensed). It's been heavily modified for use as a HTTP routing tree. - -import ( - "fmt" - "net/http" - "regexp" - "sort" - "strconv" - "strings" -) - -type methodTyp int - -const ( - mSTUB methodTyp = 1 << iota - mCONNECT - mDELETE - mGET - mHEAD - mOPTIONS - mPATCH - mPOST - mPUT - mTRACE -) - -var mALL = mCONNECT | mDELETE | mGET | mHEAD | - mOPTIONS | mPATCH | mPOST | mPUT | mTRACE - -var methodMap = map[string]methodTyp{ - http.MethodConnect: mCONNECT, - http.MethodDelete: mDELETE, - http.MethodGet: mGET, - http.MethodHead: mHEAD, - http.MethodOptions: mOPTIONS, - http.MethodPatch: mPATCH, - http.MethodPost: mPOST, - http.MethodPut: mPUT, - http.MethodTrace: mTRACE, -} - -// RegisterMethod adds support for custom HTTP method handlers, available -// via Router#Method and Router#MethodFunc -func RegisterMethod(method string) { - if method == "" { - return - } - method = strings.ToUpper(method) - if _, ok := methodMap[method]; ok { - return - } - n := len(methodMap) - if n > strconv.IntSize-2 { - panic(fmt.Sprintf("chi: max number of methods reached (%d)", strconv.IntSize)) - } - mt := methodTyp(2 << n) - methodMap[method] = mt - mALL |= mt -} - -type nodeTyp uint8 - -const ( - ntStatic nodeTyp = iota // /home - ntRegexp // /{id:[0-9]+} - ntParam // /{user} - ntCatchAll // /api/v1/* -) - -type node struct { - // node type: static, regexp, param, catchAll - typ nodeTyp - - // first byte of the prefix - label byte - - // first byte of the child prefix - tail byte - - // prefix is the common prefix we ignore - prefix string - - // regexp matcher for regexp nodes - rex *regexp.Regexp - - // HTTP handler endpoints on the leaf node - endpoints endpoints - - // subroutes on the leaf node - subroutes Routes - - // child nodes should be stored in-order for iteration, - // in groups of the node type. - children [ntCatchAll + 1]nodes -} - -// endpoints is a mapping of http method constants to handlers -// for a given route. -type endpoints map[methodTyp]*endpoint - -type endpoint struct { - // endpoint handler - handler http.Handler - - // pattern is the routing pattern for handler nodes - pattern string - - // parameter keys recorded on handler nodes - paramKeys []string -} - -func (s endpoints) Value(method methodTyp) *endpoint { - mh, ok := s[method] - if !ok { - mh = &endpoint{} - s[method] = mh - } - return mh -} - -func (n *node) InsertRoute(method methodTyp, pattern string, handler http.Handler) *node { - var parent *node - search := pattern - - for { - // Handle key exhaustion - if len(search) == 0 { - // Insert or update the node's leaf handler - n.setEndpoint(method, handler, pattern) - return n - } - - // We're going to be searching for a wild node next, - // in this case, we need to get the tail - var label = search[0] - var segTail byte - var segEndIdx int - var segTyp nodeTyp - var segRexpat string - if label == '{' || label == '*' { - segTyp, _, segRexpat, segTail, _, segEndIdx = patNextSegment(search) - } - - var prefix string - if segTyp == ntRegexp { - prefix = segRexpat - } - - // Look for the edge to attach to - parent = n - n = n.getEdge(segTyp, label, segTail, prefix) - - // No edge, create one - if n == nil { - child := &node{label: label, tail: segTail, prefix: search} - hn := parent.addChild(child, search) - hn.setEndpoint(method, handler, pattern) - - return hn - } - - // Found an edge to match the pattern - - if n.typ > ntStatic { - // We found a param node, trim the param from the search path and continue. - // This param/wild pattern segment would already be on the tree from a previous - // call to addChild when creating a new node. - search = search[segEndIdx:] - continue - } - - // Static nodes fall below here. - // Determine longest prefix of the search key on match. - commonPrefix := longestPrefix(search, n.prefix) - if commonPrefix == len(n.prefix) { - // the common prefix is as long as the current node's prefix we're attempting to insert. - // keep the search going. - search = search[commonPrefix:] - continue - } - - // Split the node - child := &node{ - typ: ntStatic, - prefix: search[:commonPrefix], - } - parent.replaceChild(search[0], segTail, child) - - // Restore the existing node - n.label = n.prefix[commonPrefix] - n.prefix = n.prefix[commonPrefix:] - child.addChild(n, n.prefix) - - // If the new key is a subset, set the method/handler on this node and finish. - search = search[commonPrefix:] - if len(search) == 0 { - child.setEndpoint(method, handler, pattern) - return child - } - - // Create a new edge for the node - subchild := &node{ - typ: ntStatic, - label: search[0], - prefix: search, - } - hn := child.addChild(subchild, search) - hn.setEndpoint(method, handler, pattern) - return hn - } -} - -// addChild appends the new `child` node to the tree using the `pattern` as the trie key. -// For a URL router like chi's, we split the static, param, regexp and wildcard segments -// into different nodes. In addition, addChild will recursively call itself until every -// pattern segment is added to the url pattern tree as individual nodes, depending on type. -func (n *node) addChild(child *node, prefix string) *node { - search := prefix - - // handler leaf node added to the tree is the child. - // this may be overridden later down the flow - hn := child - - // Parse next segment - segTyp, _, segRexpat, segTail, segStartIdx, segEndIdx := patNextSegment(search) - - // Add child depending on next up segment - switch segTyp { - - case ntStatic: - // Search prefix is all static (that is, has no params in path) - // noop - - default: - // Search prefix contains a param, regexp or wildcard - - if segTyp == ntRegexp { - rex, err := regexp.Compile(segRexpat) - if err != nil { - panic(fmt.Sprintf("chi: invalid regexp pattern '%s' in route param", segRexpat)) - } - child.prefix = segRexpat - child.rex = rex - } - - if segStartIdx == 0 { - // Route starts with a param - child.typ = segTyp - - if segTyp == ntCatchAll { - segStartIdx = -1 - } else { - segStartIdx = segEndIdx - } - if segStartIdx < 0 { - segStartIdx = len(search) - } - child.tail = segTail // for params, we set the tail - - if segStartIdx != len(search) { - // add static edge for the remaining part, split the end. - // its not possible to have adjacent param nodes, so its certainly - // going to be a static node next. - - search = search[segStartIdx:] // advance search position - - nn := &node{ - typ: ntStatic, - label: search[0], - prefix: search, - } - hn = child.addChild(nn, search) - } - - } else if segStartIdx > 0 { - // Route has some param - - // starts with a static segment - child.typ = ntStatic - child.prefix = search[:segStartIdx] - child.rex = nil - - // add the param edge node - search = search[segStartIdx:] - - nn := &node{ - typ: segTyp, - label: search[0], - tail: segTail, - } - hn = child.addChild(nn, search) - - } - } - - n.children[child.typ] = append(n.children[child.typ], child) - n.children[child.typ].Sort() - return hn -} - -func (n *node) replaceChild(label, tail byte, child *node) { - for i := 0; i < len(n.children[child.typ]); i++ { - if n.children[child.typ][i].label == label && n.children[child.typ][i].tail == tail { - n.children[child.typ][i] = child - n.children[child.typ][i].label = label - n.children[child.typ][i].tail = tail - return - } - } - panic("chi: replacing missing child") -} - -func (n *node) getEdge(ntyp nodeTyp, label, tail byte, prefix string) *node { - nds := n.children[ntyp] - for i := 0; i < len(nds); i++ { - if nds[i].label == label && nds[i].tail == tail { - if ntyp == ntRegexp && nds[i].prefix != prefix { - continue - } - return nds[i] - } - } - return nil -} - -func (n *node) setEndpoint(method methodTyp, handler http.Handler, pattern string) { - // Set the handler for the method type on the node - if n.endpoints == nil { - n.endpoints = make(endpoints) - } - - paramKeys := patParamKeys(pattern) - - if method&mSTUB == mSTUB { - n.endpoints.Value(mSTUB).handler = handler - } - if method&mALL == mALL { - h := n.endpoints.Value(mALL) - h.handler = handler - h.pattern = pattern - h.paramKeys = paramKeys - for _, m := range methodMap { - h := n.endpoints.Value(m) - h.handler = handler - h.pattern = pattern - h.paramKeys = paramKeys - } - } else { - h := n.endpoints.Value(method) - h.handler = handler - h.pattern = pattern - h.paramKeys = paramKeys - } -} - -func (n *node) FindRoute(rctx *Context, method methodTyp, path string) (*node, endpoints, http.Handler) { - // Reset the context routing pattern and params - rctx.routePattern = "" - rctx.routeParams.Keys = rctx.routeParams.Keys[:0] - rctx.routeParams.Values = rctx.routeParams.Values[:0] - - // Find the routing handlers for the path - rn := n.findRoute(rctx, method, path) - if rn == nil { - return nil, nil, nil - } - - // Record the routing params in the request lifecycle - rctx.URLParams.Keys = append(rctx.URLParams.Keys, rctx.routeParams.Keys...) - rctx.URLParams.Values = append(rctx.URLParams.Values, rctx.routeParams.Values...) - - // Record the routing pattern in the request lifecycle - if rn.endpoints[method].pattern != "" { - rctx.routePattern = rn.endpoints[method].pattern - rctx.RoutePatterns = append(rctx.RoutePatterns, rctx.routePattern) - } - - return rn, rn.endpoints, rn.endpoints[method].handler -} - -// Recursive edge traversal by checking all nodeTyp groups along the way. -// It's like searching through a multi-dimensional radix trie. -func (n *node) findRoute(rctx *Context, method methodTyp, path string) *node { - nn := n - search := path - - for t, nds := range nn.children { - ntyp := nodeTyp(t) - if len(nds) == 0 { - continue - } - - var xn *node - xsearch := search - - var label byte - if search != "" { - label = search[0] - } - - switch ntyp { - case ntStatic: - xn = nds.findEdge(label) - if xn == nil || !strings.HasPrefix(xsearch, xn.prefix) { - continue - } - xsearch = xsearch[len(xn.prefix):] - - case ntParam, ntRegexp: - // short-circuit and return no matching route for empty param values - if xsearch == "" { - continue - } - - // serially loop through each node grouped by the tail delimiter - for idx := 0; idx < len(nds); idx++ { - xn = nds[idx] - - // label for param nodes is the delimiter byte - p := strings.IndexByte(xsearch, xn.tail) - - if p < 0 { - if xn.tail == '/' { - p = len(xsearch) - } else { - continue - } - } else if ntyp == ntRegexp && p == 0 { - continue - } - - if ntyp == ntRegexp && xn.rex != nil { - if !xn.rex.MatchString(xsearch[:p]) { - continue - } - } else if strings.IndexByte(xsearch[:p], '/') != -1 { - // avoid a match across path segments - continue - } - - prevlen := len(rctx.routeParams.Values) - rctx.routeParams.Values = append(rctx.routeParams.Values, xsearch[:p]) - xsearch = xsearch[p:] - - if len(xsearch) == 0 { - if xn.isLeaf() { - h := xn.endpoints[method] - if h != nil && h.handler != nil { - rctx.routeParams.Keys = append(rctx.routeParams.Keys, h.paramKeys...) - return xn - } - - // flag that the routing context found a route, but not a corresponding - // supported method - rctx.methodNotAllowed = true - } - } - - // recursively find the next node on this branch - fin := xn.findRoute(rctx, method, xsearch) - if fin != nil { - return fin - } - - // not found on this branch, reset vars - rctx.routeParams.Values = rctx.routeParams.Values[:prevlen] - xsearch = search - } - - rctx.routeParams.Values = append(rctx.routeParams.Values, "") - - default: - // catch-all nodes - rctx.routeParams.Values = append(rctx.routeParams.Values, search) - xn = nds[0] - xsearch = "" - } - - if xn == nil { - continue - } - - // did we find it yet? - if len(xsearch) == 0 { - if xn.isLeaf() { - h := xn.endpoints[method] - if h != nil && h.handler != nil { - rctx.routeParams.Keys = append(rctx.routeParams.Keys, h.paramKeys...) - return xn - } - - // flag that the routing context found a route, but not a corresponding - // supported method - rctx.methodNotAllowed = true - } - } - - // recursively find the next node.. - fin := xn.findRoute(rctx, method, xsearch) - if fin != nil { - return fin - } - - // Did not find final handler, let's remove the param here if it was set - if xn.typ > ntStatic { - if len(rctx.routeParams.Values) > 0 { - rctx.routeParams.Values = rctx.routeParams.Values[:len(rctx.routeParams.Values)-1] - } - } - - } - - return nil -} - -func (n *node) findEdge(ntyp nodeTyp, label byte) *node { - nds := n.children[ntyp] - num := len(nds) - idx := 0 - - switch ntyp { - case ntStatic, ntParam, ntRegexp: - i, j := 0, num-1 - for i <= j { - idx = i + (j-i)/2 - if label > nds[idx].label { - i = idx + 1 - } else if label < nds[idx].label { - j = idx - 1 - } else { - i = num // breaks cond - } - } - if nds[idx].label != label { - return nil - } - return nds[idx] - - default: // catch all - return nds[idx] - } -} - -func (n *node) isLeaf() bool { - return n.endpoints != nil -} - -func (n *node) findPattern(pattern string) bool { - nn := n - for _, nds := range nn.children { - if len(nds) == 0 { - continue - } - - n = nn.findEdge(nds[0].typ, pattern[0]) - if n == nil { - continue - } - - var idx int - var xpattern string - - switch n.typ { - case ntStatic: - idx = longestPrefix(pattern, n.prefix) - if idx < len(n.prefix) { - continue - } - - case ntParam, ntRegexp: - idx = strings.IndexByte(pattern, '}') + 1 - - case ntCatchAll: - idx = longestPrefix(pattern, "*") - - default: - panic("chi: unknown node type") - } - - xpattern = pattern[idx:] - if len(xpattern) == 0 { - return true - } - - return n.findPattern(xpattern) - } - return false -} - -func (n *node) routes() []Route { - rts := []Route{} - - n.walk(func(eps endpoints, subroutes Routes) bool { - if eps[mSTUB] != nil && eps[mSTUB].handler != nil && subroutes == nil { - return false - } - - // Group methodHandlers by unique patterns - pats := make(map[string]endpoints) - - for mt, h := range eps { - if h.pattern == "" { - continue - } - p, ok := pats[h.pattern] - if !ok { - p = endpoints{} - pats[h.pattern] = p - } - p[mt] = h - } - - for p, mh := range pats { - hs := make(map[string]http.Handler) - if mh[mALL] != nil && mh[mALL].handler != nil { - hs["*"] = mh[mALL].handler - } - - for mt, h := range mh { - if h.handler == nil { - continue - } - m := methodTypString(mt) - if m == "" { - continue - } - hs[m] = h.handler - } - - rt := Route{p, hs, subroutes} - rts = append(rts, rt) - } - - return false - }) - - return rts -} - -func (n *node) walk(fn func(eps endpoints, subroutes Routes) bool) bool { - // Visit the leaf values if any - if (n.endpoints != nil || n.subroutes != nil) && fn(n.endpoints, n.subroutes) { - return true - } - - // Recurse on the children - for _, ns := range n.children { - for _, cn := range ns { - if cn.walk(fn) { - return true - } - } - } - return false -} - -// patNextSegment returns the next segment details from a pattern: -// node type, param key, regexp string, param tail byte, param starting index, param ending index -func patNextSegment(pattern string) (nodeTyp, string, string, byte, int, int) { - ps := strings.Index(pattern, "{") - ws := strings.Index(pattern, "*") - - if ps < 0 && ws < 0 { - return ntStatic, "", "", 0, 0, len(pattern) // we return the entire thing - } - - // Sanity check - if ps >= 0 && ws >= 0 && ws < ps { - panic("chi: wildcard '*' must be the last pattern in a route, otherwise use a '{param}'") - } - - var tail byte = '/' // Default endpoint tail to / byte - - if ps >= 0 { - // Param/Regexp pattern is next - nt := ntParam - - // Read to closing } taking into account opens and closes in curl count (cc) - cc := 0 - pe := ps - for i, c := range pattern[ps:] { - if c == '{' { - cc++ - } else if c == '}' { - cc-- - if cc == 0 { - pe = ps + i - break - } - } - } - if pe == ps { - panic("chi: route param closing delimiter '}' is missing") - } - - key := pattern[ps+1 : pe] - pe++ // set end to next position - - if pe < len(pattern) { - tail = pattern[pe] - } - - var rexpat string - if idx := strings.Index(key, ":"); idx >= 0 { - nt = ntRegexp - rexpat = key[idx+1:] - key = key[:idx] - } - - if len(rexpat) > 0 { - if rexpat[0] != '^' { - rexpat = "^" + rexpat - } - if rexpat[len(rexpat)-1] != '$' { - rexpat += "$" - } - } - - return nt, key, rexpat, tail, ps, pe - } - - // Wildcard pattern as finale - if ws < len(pattern)-1 { - panic("chi: wildcard '*' must be the last value in a route. trim trailing text or use a '{param}' instead") - } - return ntCatchAll, "*", "", 0, ws, len(pattern) -} - -func patParamKeys(pattern string) []string { - pat := pattern - paramKeys := []string{} - for { - ptyp, paramKey, _, _, _, e := patNextSegment(pat) - if ptyp == ntStatic { - return paramKeys - } - for i := 0; i < len(paramKeys); i++ { - if paramKeys[i] == paramKey { - panic(fmt.Sprintf("chi: routing pattern '%s' contains duplicate param key, '%s'", pattern, paramKey)) - } - } - paramKeys = append(paramKeys, paramKey) - pat = pat[e:] - } -} - -// longestPrefix finds the length of the shared prefix -// of two strings -func longestPrefix(k1, k2 string) int { - max := len(k1) - if l := len(k2); l < max { - max = l - } - var i int - for i = 0; i < max; i++ { - if k1[i] != k2[i] { - break - } - } - return i -} - -func methodTypString(method methodTyp) string { - for s, t := range methodMap { - if method == t { - return s - } - } - return "" -} - -type nodes []*node - -// Sort the list of nodes by label -func (ns nodes) Sort() { sort.Sort(ns); ns.tailSort() } -func (ns nodes) Len() int { return len(ns) } -func (ns nodes) Swap(i, j int) { ns[i], ns[j] = ns[j], ns[i] } -func (ns nodes) Less(i, j int) bool { return ns[i].label < ns[j].label } - -// tailSort pushes nodes with '/' as the tail to the end of the list for param nodes. -// The list order determines the traversal order. -func (ns nodes) tailSort() { - for i := len(ns) - 1; i >= 0; i-- { - if ns[i].typ > ntStatic && ns[i].tail == '/' { - ns.Swap(i, len(ns)-1) - return - } - } -} - -func (ns nodes) findEdge(label byte) *node { - num := len(ns) - idx := 0 - i, j := 0, num-1 - for i <= j { - idx = i + (j-i)/2 - if label > ns[idx].label { - i = idx + 1 - } else if label < ns[idx].label { - j = idx - 1 - } else { - i = num // breaks cond - } - } - if ns[idx].label != label { - return nil - } - return ns[idx] -} - -// Route describes the details of a routing handler. -// Handlers map key is an HTTP method -type Route struct { - Pattern string - Handlers map[string]http.Handler - SubRoutes Routes -} - -// WalkFunc is the type of the function called for each method and route visited by Walk. -type WalkFunc func(method string, route string, handler http.Handler, middlewares ...func(http.Handler) http.Handler) error - -// Walk walks any router tree that implements Routes interface. -func Walk(r Routes, walkFn WalkFunc) error { - return walk(r, walkFn, "") -} - -func walk(r Routes, walkFn WalkFunc, parentRoute string, parentMw ...func(http.Handler) http.Handler) error { - for _, route := range r.Routes() { - mws := make([]func(http.Handler) http.Handler, len(parentMw)) - copy(mws, parentMw) - mws = append(mws, r.Middlewares()...) - - if route.SubRoutes != nil { - if err := walk(route.SubRoutes, walkFn, parentRoute+route.Pattern, mws...); err != nil { - return err - } - continue - } - - for method, handler := range route.Handlers { - if method == "*" { - // Ignore a "catchAll" method, since we pass down all the specific methods for each route. - continue - } - - fullRoute := parentRoute + route.Pattern - fullRoute = strings.Replace(fullRoute, "/*/", "/", -1) - - if chain, ok := handler.(*ChainHandler); ok { - if err := walkFn(method, fullRoute, chain.Endpoint, append(mws, chain.Middlewares...)...); err != nil { - return err - } - } else { - if err := walkFn(method, fullRoute, handler, mws...); err != nil { - return err - } - } - } - } - - return nil -} +package chi + +// Radix tree implementation below is a based on the original work by +// Armon Dadgar in https://github.com/armon/go-radix/blob/master/radix.go +// (MIT licensed). It's been heavily modified for use as a HTTP routing tree. + +import ( + "fmt" + "net/http" + "regexp" + "sort" + "strconv" + "strings" +) + +type methodTyp int + +const ( + mSTUB methodTyp = 1 << iota + mCONNECT + mDELETE + mGET + mHEAD + mOPTIONS + mPATCH + mPOST + mPUT + mTRACE +) + +var mALL = mCONNECT | mDELETE | mGET | mHEAD | + mOPTIONS | mPATCH | mPOST | mPUT | mTRACE + +var methodMap = map[string]methodTyp{ + http.MethodConnect: mCONNECT, + http.MethodDelete: mDELETE, + http.MethodGet: mGET, + http.MethodHead: mHEAD, + http.MethodOptions: mOPTIONS, + http.MethodPatch: mPATCH, + http.MethodPost: mPOST, + http.MethodPut: mPUT, + http.MethodTrace: mTRACE, +} + +// RegisterMethod adds support for custom HTTP method handlers, available +// via Router#Method and Router#MethodFunc +func RegisterMethod(method string) { + if method == "" { + return + } + method = strings.ToUpper(method) + if _, ok := methodMap[method]; ok { + return + } + n := len(methodMap) + if n > strconv.IntSize-2 { + panic(fmt.Sprintf("chi: max number of methods reached (%d)", strconv.IntSize)) + } + mt := methodTyp(2 << n) + methodMap[method] = mt + mALL |= mt +} + +type nodeTyp uint8 + +const ( + ntStatic nodeTyp = iota // /home + ntRegexp // /{id:[0-9]+} + ntParam // /{user} + ntCatchAll // /api/v1/* +) + +type node struct { + // node type: static, regexp, param, catchAll + typ nodeTyp + + // first byte of the prefix + label byte + + // first byte of the child prefix + tail byte + + // prefix is the common prefix we ignore + prefix string + + // regexp matcher for regexp nodes + rex *regexp.Regexp + + // HTTP handler endpoints on the leaf node + endpoints endpoints + + // subroutes on the leaf node + subroutes Routes + + // child nodes should be stored in-order for iteration, + // in groups of the node type. + children [ntCatchAll + 1]nodes +} + +// endpoints is a mapping of http method constants to handlers +// for a given route. +type endpoints map[methodTyp]*endpoint + +type endpoint struct { + // endpoint handler + handler http.Handler + + // pattern is the routing pattern for handler nodes + pattern string + + // parameter keys recorded on handler nodes + paramKeys []string +} + +func (s endpoints) Value(method methodTyp) *endpoint { + mh, ok := s[method] + if !ok { + mh = &endpoint{} + s[method] = mh + } + return mh +} + +func (n *node) InsertRoute(method methodTyp, pattern string, handler http.Handler) *node { + var parent *node + search := pattern + + for { + // Handle key exhaustion + if len(search) == 0 { + // Insert or update the node's leaf handler + n.setEndpoint(method, handler, pattern) + return n + } + + // We're going to be searching for a wild node next, + // in this case, we need to get the tail + var label = search[0] + var segTail byte + var segEndIdx int + var segTyp nodeTyp + var segRexpat string + if label == '{' || label == '*' { + segTyp, _, segRexpat, segTail, _, segEndIdx = patNextSegment(search) + } + + var prefix string + if segTyp == ntRegexp { + prefix = segRexpat + } + + // Look for the edge to attach to + parent = n + n = n.getEdge(segTyp, label, segTail, prefix) + + // No edge, create one + if n == nil { + child := &node{label: label, tail: segTail, prefix: search} + hn := parent.addChild(child, search) + hn.setEndpoint(method, handler, pattern) + + return hn + } + + // Found an edge to match the pattern + + if n.typ > ntStatic { + // We found a param node, trim the param from the search path and continue. + // This param/wild pattern segment would already be on the tree from a previous + // call to addChild when creating a new node. + search = search[segEndIdx:] + continue + } + + // Static nodes fall below here. + // Determine longest prefix of the search key on match. + commonPrefix := longestPrefix(search, n.prefix) + if commonPrefix == len(n.prefix) { + // the common prefix is as long as the current node's prefix we're attempting to insert. + // keep the search going. + search = search[commonPrefix:] + continue + } + + // Split the node + child := &node{ + typ: ntStatic, + prefix: search[:commonPrefix], + } + parent.replaceChild(search[0], segTail, child) + + // Restore the existing node + n.label = n.prefix[commonPrefix] + n.prefix = n.prefix[commonPrefix:] + child.addChild(n, n.prefix) + + // If the new key is a subset, set the method/handler on this node and finish. + search = search[commonPrefix:] + if len(search) == 0 { + child.setEndpoint(method, handler, pattern) + return child + } + + // Create a new edge for the node + subchild := &node{ + typ: ntStatic, + label: search[0], + prefix: search, + } + hn := child.addChild(subchild, search) + hn.setEndpoint(method, handler, pattern) + return hn + } +} + +// addChild appends the new `child` node to the tree using the `pattern` as the trie key. +// For a URL router like chi's, we split the static, param, regexp and wildcard segments +// into different nodes. In addition, addChild will recursively call itself until every +// pattern segment is added to the url pattern tree as individual nodes, depending on type. +func (n *node) addChild(child *node, prefix string) *node { + search := prefix + + // handler leaf node added to the tree is the child. + // this may be overridden later down the flow + hn := child + + // Parse next segment + segTyp, _, segRexpat, segTail, segStartIdx, segEndIdx := patNextSegment(search) + + // Add child depending on next up segment + switch segTyp { + + case ntStatic: + // Search prefix is all static (that is, has no params in path) + // noop + + default: + // Search prefix contains a param, regexp or wildcard + + if segTyp == ntRegexp { + rex, err := regexp.Compile(segRexpat) + if err != nil { + panic(fmt.Sprintf("chi: invalid regexp pattern '%s' in route param", segRexpat)) + } + child.prefix = segRexpat + child.rex = rex + } + + if segStartIdx == 0 { + // Route starts with a param + child.typ = segTyp + + if segTyp == ntCatchAll { + segStartIdx = -1 + } else { + segStartIdx = segEndIdx + } + if segStartIdx < 0 { + segStartIdx = len(search) + } + child.tail = segTail // for params, we set the tail + + if segStartIdx != len(search) { + // add static edge for the remaining part, split the end. + // its not possible to have adjacent param nodes, so its certainly + // going to be a static node next. + + search = search[segStartIdx:] // advance search position + + nn := &node{ + typ: ntStatic, + label: search[0], + prefix: search, + } + hn = child.addChild(nn, search) + } + + } else if segStartIdx > 0 { + // Route has some param + + // starts with a static segment + child.typ = ntStatic + child.prefix = search[:segStartIdx] + child.rex = nil + + // add the param edge node + search = search[segStartIdx:] + + nn := &node{ + typ: segTyp, + label: search[0], + tail: segTail, + } + hn = child.addChild(nn, search) + + } + } + + n.children[child.typ] = append(n.children[child.typ], child) + n.children[child.typ].Sort() + return hn +} + +func (n *node) replaceChild(label, tail byte, child *node) { + for i := 0; i < len(n.children[child.typ]); i++ { + if n.children[child.typ][i].label == label && n.children[child.typ][i].tail == tail { + n.children[child.typ][i] = child + n.children[child.typ][i].label = label + n.children[child.typ][i].tail = tail + return + } + } + panic("chi: replacing missing child") +} + +func (n *node) getEdge(ntyp nodeTyp, label, tail byte, prefix string) *node { + nds := n.children[ntyp] + for i := 0; i < len(nds); i++ { + if nds[i].label == label && nds[i].tail == tail { + if ntyp == ntRegexp && nds[i].prefix != prefix { + continue + } + return nds[i] + } + } + return nil +} + +func (n *node) setEndpoint(method methodTyp, handler http.Handler, pattern string) { + // Set the handler for the method type on the node + if n.endpoints == nil { + n.endpoints = make(endpoints) + } + + paramKeys := patParamKeys(pattern) + + if method&mSTUB == mSTUB { + n.endpoints.Value(mSTUB).handler = handler + } + if method&mALL == mALL { + h := n.endpoints.Value(mALL) + h.handler = handler + h.pattern = pattern + h.paramKeys = paramKeys + for _, m := range methodMap { + h := n.endpoints.Value(m) + h.handler = handler + h.pattern = pattern + h.paramKeys = paramKeys + } + } else { + h := n.endpoints.Value(method) + h.handler = handler + h.pattern = pattern + h.paramKeys = paramKeys + } +} + +func (n *node) FindRoute(rctx *Context, method methodTyp, path string) (*node, endpoints, http.Handler) { + // Reset the context routing pattern and params + rctx.routePattern = "" + rctx.routeParams.Keys = rctx.routeParams.Keys[:0] + rctx.routeParams.Values = rctx.routeParams.Values[:0] + + // Find the routing handlers for the path + rn := n.findRoute(rctx, method, path) + if rn == nil { + return nil, nil, nil + } + + // Record the routing params in the request lifecycle + rctx.URLParams.Keys = append(rctx.URLParams.Keys, rctx.routeParams.Keys...) + rctx.URLParams.Values = append(rctx.URLParams.Values, rctx.routeParams.Values...) + + // Record the routing pattern in the request lifecycle + if rn.endpoints[method].pattern != "" { + rctx.routePattern = rn.endpoints[method].pattern + rctx.RoutePatterns = append(rctx.RoutePatterns, rctx.routePattern) + } + + return rn, rn.endpoints, rn.endpoints[method].handler +} + +// Recursive edge traversal by checking all nodeTyp groups along the way. +// It's like searching through a multi-dimensional radix trie. +func (n *node) findRoute(rctx *Context, method methodTyp, path string) *node { + nn := n + search := path + + for t, nds := range nn.children { + ntyp := nodeTyp(t) + if len(nds) == 0 { + continue + } + + var xn *node + xsearch := search + + var label byte + if search != "" { + label = search[0] + } + + switch ntyp { + case ntStatic: + xn = nds.findEdge(label) + if xn == nil || !strings.HasPrefix(xsearch, xn.prefix) { + continue + } + xsearch = xsearch[len(xn.prefix):] + + case ntParam, ntRegexp: + // short-circuit and return no matching route for empty param values + if xsearch == "" { + continue + } + + // serially loop through each node grouped by the tail delimiter + for idx := 0; idx < len(nds); idx++ { + xn = nds[idx] + + // label for param nodes is the delimiter byte + p := strings.IndexByte(xsearch, xn.tail) + + if p < 0 { + if xn.tail == '/' { + p = len(xsearch) + } else { + continue + } + } else if ntyp == ntRegexp && p == 0 { + continue + } + + if ntyp == ntRegexp && xn.rex != nil { + if !xn.rex.MatchString(xsearch[:p]) { + continue + } + } else if strings.IndexByte(xsearch[:p], '/') != -1 { + // avoid a match across path segments + continue + } + + prevlen := len(rctx.routeParams.Values) + rctx.routeParams.Values = append(rctx.routeParams.Values, xsearch[:p]) + xsearch = xsearch[p:] + + if len(xsearch) == 0 { + if xn.isLeaf() { + h := xn.endpoints[method] + if h != nil && h.handler != nil { + rctx.routeParams.Keys = append(rctx.routeParams.Keys, h.paramKeys...) + return xn + } + + // flag that the routing context found a route, but not a corresponding + // supported method + rctx.methodNotAllowed = true + } + } + + // recursively find the next node on this branch + fin := xn.findRoute(rctx, method, xsearch) + if fin != nil { + return fin + } + + // not found on this branch, reset vars + rctx.routeParams.Values = rctx.routeParams.Values[:prevlen] + xsearch = search + } + + rctx.routeParams.Values = append(rctx.routeParams.Values, "") + + default: + // catch-all nodes + rctx.routeParams.Values = append(rctx.routeParams.Values, search) + xn = nds[0] + xsearch = "" + } + + if xn == nil { + continue + } + + // did we find it yet? + if len(xsearch) == 0 { + if xn.isLeaf() { + h := xn.endpoints[method] + if h != nil && h.handler != nil { + rctx.routeParams.Keys = append(rctx.routeParams.Keys, h.paramKeys...) + return xn + } + + // flag that the routing context found a route, but not a corresponding + // supported method + rctx.methodNotAllowed = true + } + } + + // recursively find the next node.. + fin := xn.findRoute(rctx, method, xsearch) + if fin != nil { + return fin + } + + // Did not find final handler, let's remove the param here if it was set + if xn.typ > ntStatic { + if len(rctx.routeParams.Values) > 0 { + rctx.routeParams.Values = rctx.routeParams.Values[:len(rctx.routeParams.Values)-1] + } + } + + } + + return nil +} + +func (n *node) findEdge(ntyp nodeTyp, label byte) *node { + nds := n.children[ntyp] + num := len(nds) + idx := 0 + + switch ntyp { + case ntStatic, ntParam, ntRegexp: + i, j := 0, num-1 + for i <= j { + idx = i + (j-i)/2 + if label > nds[idx].label { + i = idx + 1 + } else if label < nds[idx].label { + j = idx - 1 + } else { + i = num // breaks cond + } + } + if nds[idx].label != label { + return nil + } + return nds[idx] + + default: // catch all + return nds[idx] + } +} + +func (n *node) isLeaf() bool { + return n.endpoints != nil +} + +func (n *node) findPattern(pattern string) bool { + nn := n + for _, nds := range nn.children { + if len(nds) == 0 { + continue + } + + n = nn.findEdge(nds[0].typ, pattern[0]) + if n == nil { + continue + } + + var idx int + var xpattern string + + switch n.typ { + case ntStatic: + idx = longestPrefix(pattern, n.prefix) + if idx < len(n.prefix) { + continue + } + + case ntParam, ntRegexp: + idx = strings.IndexByte(pattern, '}') + 1 + + case ntCatchAll: + idx = longestPrefix(pattern, "*") + + default: + panic("chi: unknown node type") + } + + xpattern = pattern[idx:] + if len(xpattern) == 0 { + return true + } + + return n.findPattern(xpattern) + } + return false +} + +func (n *node) routes() []Route { + rts := []Route{} + + n.walk(func(eps endpoints, subroutes Routes) bool { + if eps[mSTUB] != nil && eps[mSTUB].handler != nil && subroutes == nil { + return false + } + + // Group methodHandlers by unique patterns + pats := make(map[string]endpoints) + + for mt, h := range eps { + if h.pattern == "" { + continue + } + p, ok := pats[h.pattern] + if !ok { + p = endpoints{} + pats[h.pattern] = p + } + p[mt] = h + } + + for p, mh := range pats { + hs := make(map[string]http.Handler) + if mh[mALL] != nil && mh[mALL].handler != nil { + hs["*"] = mh[mALL].handler + } + + for mt, h := range mh { + if h.handler == nil { + continue + } + m := methodTypString(mt) + if m == "" { + continue + } + hs[m] = h.handler + } + + rt := Route{p, hs, subroutes} + rts = append(rts, rt) + } + + return false + }) + + return rts +} + +func (n *node) walk(fn func(eps endpoints, subroutes Routes) bool) bool { + // Visit the leaf values if any + if (n.endpoints != nil || n.subroutes != nil) && fn(n.endpoints, n.subroutes) { + return true + } + + // Recurse on the children + for _, ns := range n.children { + for _, cn := range ns { + if cn.walk(fn) { + return true + } + } + } + return false +} + +// patNextSegment returns the next segment details from a pattern: +// node type, param key, regexp string, param tail byte, param starting index, param ending index +func patNextSegment(pattern string) (nodeTyp, string, string, byte, int, int) { + ps := strings.Index(pattern, "{") + ws := strings.Index(pattern, "*") + + if ps < 0 && ws < 0 { + return ntStatic, "", "", 0, 0, len(pattern) // we return the entire thing + } + + // Sanity check + if ps >= 0 && ws >= 0 && ws < ps { + panic("chi: wildcard '*' must be the last pattern in a route, otherwise use a '{param}'") + } + + var tail byte = '/' // Default endpoint tail to / byte + + if ps >= 0 { + // Param/Regexp pattern is next + nt := ntParam + + // Read to closing } taking into account opens and closes in curl count (cc) + cc := 0 + pe := ps + for i, c := range pattern[ps:] { + if c == '{' { + cc++ + } else if c == '}' { + cc-- + if cc == 0 { + pe = ps + i + break + } + } + } + if pe == ps { + panic("chi: route param closing delimiter '}' is missing") + } + + key := pattern[ps+1 : pe] + pe++ // set end to next position + + if pe < len(pattern) { + tail = pattern[pe] + } + + var rexpat string + if idx := strings.Index(key, ":"); idx >= 0 { + nt = ntRegexp + rexpat = key[idx+1:] + key = key[:idx] + } + + if len(rexpat) > 0 { + if rexpat[0] != '^' { + rexpat = "^" + rexpat + } + if rexpat[len(rexpat)-1] != '$' { + rexpat += "$" + } + } + + return nt, key, rexpat, tail, ps, pe + } + + // Wildcard pattern as finale + if ws < len(pattern)-1 { + panic("chi: wildcard '*' must be the last value in a route. trim trailing text or use a '{param}' instead") + } + return ntCatchAll, "*", "", 0, ws, len(pattern) +} + +func patParamKeys(pattern string) []string { + pat := pattern + paramKeys := []string{} + for { + ptyp, paramKey, _, _, _, e := patNextSegment(pat) + if ptyp == ntStatic { + return paramKeys + } + for i := 0; i < len(paramKeys); i++ { + if paramKeys[i] == paramKey { + panic(fmt.Sprintf("chi: routing pattern '%s' contains duplicate param key, '%s'", pattern, paramKey)) + } + } + paramKeys = append(paramKeys, paramKey) + pat = pat[e:] + } +} + +// longestPrefix finds the length of the shared prefix +// of two strings +func longestPrefix(k1, k2 string) int { + max := len(k1) + if l := len(k2); l < max { + max = l + } + var i int + for i = 0; i < max; i++ { + if k1[i] != k2[i] { + break + } + } + return i +} + +func methodTypString(method methodTyp) string { + for s, t := range methodMap { + if method == t { + return s + } + } + return "" +} + +type nodes []*node + +// Sort the list of nodes by label +func (ns nodes) Sort() { sort.Sort(ns); ns.tailSort() } +func (ns nodes) Len() int { return len(ns) } +func (ns nodes) Swap(i, j int) { ns[i], ns[j] = ns[j], ns[i] } +func (ns nodes) Less(i, j int) bool { return ns[i].label < ns[j].label } + +// tailSort pushes nodes with '/' as the tail to the end of the list for param nodes. +// The list order determines the traversal order. +func (ns nodes) tailSort() { + for i := len(ns) - 1; i >= 0; i-- { + if ns[i].typ > ntStatic && ns[i].tail == '/' { + ns.Swap(i, len(ns)-1) + return + } + } +} + +func (ns nodes) findEdge(label byte) *node { + num := len(ns) + idx := 0 + i, j := 0, num-1 + for i <= j { + idx = i + (j-i)/2 + if label > ns[idx].label { + i = idx + 1 + } else if label < ns[idx].label { + j = idx - 1 + } else { + i = num // breaks cond + } + } + if ns[idx].label != label { + return nil + } + return ns[idx] +} + +// Route describes the details of a routing handler. +// Handlers map key is an HTTP method +type Route struct { + Pattern string + Handlers map[string]http.Handler + SubRoutes Routes +} + +// WalkFunc is the type of the function called for each method and route visited by Walk. +type WalkFunc func(method string, route string, handler http.Handler, middlewares ...func(http.Handler) http.Handler) error + +// Walk walks any router tree that implements Routes interface. +func Walk(r Routes, walkFn WalkFunc) error { + return walk(r, walkFn, "") +} + +func walk(r Routes, walkFn WalkFunc, parentRoute string, parentMw ...func(http.Handler) http.Handler) error { + for _, route := range r.Routes() { + mws := make([]func(http.Handler) http.Handler, len(parentMw)) + copy(mws, parentMw) + mws = append(mws, r.Middlewares()...) + + if route.SubRoutes != nil { + if err := walk(route.SubRoutes, walkFn, parentRoute+route.Pattern, mws...); err != nil { + return err + } + continue + } + + for method, handler := range route.Handlers { + if method == "*" { + // Ignore a "catchAll" method, since we pass down all the specific methods for each route. + continue + } + + fullRoute := parentRoute + route.Pattern + fullRoute = strings.Replace(fullRoute, "/*/", "/", -1) + + if chain, ok := handler.(*ChainHandler); ok { + if err := walkFn(method, fullRoute, chain.Endpoint, append(mws, chain.Middlewares...)...); err != nil { + return err + } + } else { + if err := walkFn(method, fullRoute, handler, mws...); err != nil { + return err + } + } + } + } + + return nil +} diff --git a/vendor/github.com/go-chi/cors/LICENSE b/vendor/github.com/go-chi/cors/LICENSE index aee6182f9a..4625ccdb6b 100644 --- a/vendor/github.com/go-chi/cors/LICENSE +++ b/vendor/github.com/go-chi/cors/LICENSE @@ -1,21 +1,21 @@ -Copyright (c) 2014 Olivier Poitrey -Copyright (c) 2016-Present https://github.com/go-chi authors - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +Copyright (c) 2014 Olivier Poitrey +Copyright (c) 2016-Present https://github.com/go-chi authors + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/go-chi/cors/README.md b/vendor/github.com/go-chi/cors/README.md index b41686b6af..dd388d178f 100644 --- a/vendor/github.com/go-chi/cors/README.md +++ b/vendor/github.com/go-chi/cors/README.md @@ -1,39 +1,39 @@ -# CORS net/http middleware - -[go-chi/cors](https://github.com/go-chi/cors) is a fork of [github.com/rs/cors](https://github.com/rs/cors) that -provides a `net/http` compatible middleware for performing preflight CORS checks on the server side. These headers -are required for using the browser native [Fetch API](https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API). - -This middleware is designed to be used as a top-level middleware on the [chi](https://github.com/go-chi/chi) router. -Applying with within a `r.Group()` or using `With()` will not work without routes matching `OPTIONS` added. - -## Usage - -```go -func main() { - r := chi.NewRouter() - - // Basic CORS - // for more ideas, see: https://developer.github.com/v3/#cross-origin-resource-sharing - r.Use(cors.Handler(cors.Options{ - // AllowedOrigins: []string{"https://foo.com"}, // Use this to allow specific origin hosts - AllowedOrigins: []string{"https://*", "http://*"}, - // AllowOriginFunc: func(r *http.Request, origin string) bool { return true }, - AllowedMethods: []string{"GET", "POST", "PUT", "DELETE", "OPTIONS"}, - AllowedHeaders: []string{"Accept", "Authorization", "Content-Type", "X-CSRF-Token"}, - ExposedHeaders: []string{"Link"}, - AllowCredentials: false, - MaxAge: 300, // Maximum value not ignored by any of major browsers - })) - - r.Get("/", func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte("welcome")) - }) - - http.ListenAndServe(":3000", r) -} -``` - -## Credits - -All credit for the original work of this middleware goes out to [github.com/rs](github.com/rs). +# CORS net/http middleware + +[go-chi/cors](https://github.com/go-chi/cors) is a fork of [github.com/rs/cors](https://github.com/rs/cors) that +provides a `net/http` compatible middleware for performing preflight CORS checks on the server side. These headers +are required for using the browser native [Fetch API](https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API). + +This middleware is designed to be used as a top-level middleware on the [chi](https://github.com/go-chi/chi) router. +Applying with within a `r.Group()` or using `With()` will not work without routes matching `OPTIONS` added. + +## Usage + +```go +func main() { + r := chi.NewRouter() + + // Basic CORS + // for more ideas, see: https://developer.github.com/v3/#cross-origin-resource-sharing + r.Use(cors.Handler(cors.Options{ + // AllowedOrigins: []string{"https://foo.com"}, // Use this to allow specific origin hosts + AllowedOrigins: []string{"https://*", "http://*"}, + // AllowOriginFunc: func(r *http.Request, origin string) bool { return true }, + AllowedMethods: []string{"GET", "POST", "PUT", "DELETE", "OPTIONS"}, + AllowedHeaders: []string{"Accept", "Authorization", "Content-Type", "X-CSRF-Token"}, + ExposedHeaders: []string{"Link"}, + AllowCredentials: false, + MaxAge: 300, // Maximum value not ignored by any of major browsers + })) + + r.Get("/", func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("welcome")) + }) + + http.ListenAndServe(":3000", r) +} +``` + +## Credits + +All credit for the original work of this middleware goes out to [github.com/rs](github.com/rs). diff --git a/vendor/github.com/go-chi/cors/cors.go b/vendor/github.com/go-chi/cors/cors.go index 8df81636e3..f6023ae9d9 100644 --- a/vendor/github.com/go-chi/cors/cors.go +++ b/vendor/github.com/go-chi/cors/cors.go @@ -1,400 +1,400 @@ -// cors package is net/http handler to handle CORS related requests -// as defined by http://www.w3.org/TR/cors/ -// -// You can configure it by passing an option struct to cors.New: -// -// c := cors.New(cors.Options{ -// AllowedOrigins: []string{"foo.com"}, -// AllowedMethods: []string{"GET", "POST", "DELETE"}, -// AllowCredentials: true, -// }) -// -// Then insert the handler in the chain: -// -// handler = c.Handler(handler) -// -// See Options documentation for more options. -// -// The resulting handler is a standard net/http handler. -package cors - -import ( - "log" - "net/http" - "os" - "strconv" - "strings" -) - -// Options is a configuration container to setup the CORS middleware. -type Options struct { - // AllowedOrigins is a list of origins a cross-domain request can be executed from. - // If the special "*" value is present in the list, all origins will be allowed. - // An origin may contain a wildcard (*) to replace 0 or more characters - // (i.e.: http://*.domain.com). Usage of wildcards implies a small performance penalty. - // Only one wildcard can be used per origin. - // Default value is ["*"] - AllowedOrigins []string - - // AllowOriginFunc is a custom function to validate the origin. It takes the origin - // as argument and returns true if allowed or false otherwise. If this option is - // set, the content of AllowedOrigins is ignored. - AllowOriginFunc func(r *http.Request, origin string) bool - - // AllowedMethods is a list of methods the client is allowed to use with - // cross-domain requests. Default value is simple methods (HEAD, GET and POST). - AllowedMethods []string - - // AllowedHeaders is list of non simple headers the client is allowed to use with - // cross-domain requests. - // If the special "*" value is present in the list, all headers will be allowed. - // Default value is [] but "Origin" is always appended to the list. - AllowedHeaders []string - - // ExposedHeaders indicates which headers are safe to expose to the API of a CORS - // API specification - ExposedHeaders []string - - // AllowCredentials indicates whether the request can include user credentials like - // cookies, HTTP authentication or client side SSL certificates. - AllowCredentials bool - - // MaxAge indicates how long (in seconds) the results of a preflight request - // can be cached - MaxAge int - - // OptionsPassthrough instructs preflight to let other potential next handlers to - // process the OPTIONS method. Turn this on if your application handles OPTIONS. - OptionsPassthrough bool - - // Debugging flag adds additional output to debug server side CORS issues - Debug bool -} - -// Logger generic interface for logger -type Logger interface { - Printf(string, ...interface{}) -} - -// Cors http handler -type Cors struct { - // Debug logger - Log Logger - - // Normalized list of plain allowed origins - allowedOrigins []string - - // List of allowed origins containing wildcards - allowedWOrigins []wildcard - - // Optional origin validator function - allowOriginFunc func(r *http.Request, origin string) bool - - // Normalized list of allowed headers - allowedHeaders []string - - // Normalized list of allowed methods - allowedMethods []string - - // Normalized list of exposed headers - exposedHeaders []string - maxAge int - - // Set to true when allowed origins contains a "*" - allowedOriginsAll bool - - // Set to true when allowed headers contains a "*" - allowedHeadersAll bool - - allowCredentials bool - optionPassthrough bool -} - -// New creates a new Cors handler with the provided options. -func New(options Options) *Cors { - c := &Cors{ - exposedHeaders: convert(options.ExposedHeaders, http.CanonicalHeaderKey), - allowOriginFunc: options.AllowOriginFunc, - allowCredentials: options.AllowCredentials, - maxAge: options.MaxAge, - optionPassthrough: options.OptionsPassthrough, - } - if options.Debug && c.Log == nil { - c.Log = log.New(os.Stdout, "[cors] ", log.LstdFlags) - } - - // Normalize options - // Note: for origins and methods matching, the spec requires a case-sensitive matching. - // As it may error prone, we chose to ignore the spec here. - - // Allowed Origins - if len(options.AllowedOrigins) == 0 { - if options.AllowOriginFunc == nil { - // Default is all origins - c.allowedOriginsAll = true - } - } else { - c.allowedOrigins = []string{} - c.allowedWOrigins = []wildcard{} - for _, origin := range options.AllowedOrigins { - // Normalize - origin = strings.ToLower(origin) - if origin == "*" { - // If "*" is present in the list, turn the whole list into a match all - c.allowedOriginsAll = true - c.allowedOrigins = nil - c.allowedWOrigins = nil - break - } else if i := strings.IndexByte(origin, '*'); i >= 0 { - // Split the origin in two: start and end string without the * - w := wildcard{origin[0:i], origin[i+1:]} - c.allowedWOrigins = append(c.allowedWOrigins, w) - } else { - c.allowedOrigins = append(c.allowedOrigins, origin) - } - } - } - - // Allowed Headers - if len(options.AllowedHeaders) == 0 { - // Use sensible defaults - c.allowedHeaders = []string{"Origin", "Accept", "Content-Type"} - } else { - // Origin is always appended as some browsers will always request for this header at preflight - c.allowedHeaders = convert(append(options.AllowedHeaders, "Origin"), http.CanonicalHeaderKey) - for _, h := range options.AllowedHeaders { - if h == "*" { - c.allowedHeadersAll = true - c.allowedHeaders = nil - break - } - } - } - - // Allowed Methods - if len(options.AllowedMethods) == 0 { - // Default is spec's "simple" methods - c.allowedMethods = []string{http.MethodGet, http.MethodPost, http.MethodHead} - } else { - c.allowedMethods = convert(options.AllowedMethods, strings.ToUpper) - } - - return c -} - -// Handler creates a new Cors handler with passed options. -func Handler(options Options) func(next http.Handler) http.Handler { - c := New(options) - return c.Handler -} - -// AllowAll create a new Cors handler with permissive configuration allowing all -// origins with all standard methods with any header and credentials. -func AllowAll() *Cors { - return New(Options{ - AllowedOrigins: []string{"*"}, - AllowedMethods: []string{ - http.MethodHead, - http.MethodGet, - http.MethodPost, - http.MethodPut, - http.MethodPatch, - http.MethodDelete, - }, - AllowedHeaders: []string{"*"}, - AllowCredentials: false, - }) -} - -// Handler apply the CORS specification on the request, and add relevant CORS headers -// as necessary. -func (c *Cors) Handler(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Method == http.MethodOptions && r.Header.Get("Access-Control-Request-Method") != "" { - c.logf("Handler: Preflight request") - c.handlePreflight(w, r) - // Preflight requests are standalone and should stop the chain as some other - // middleware may not handle OPTIONS requests correctly. One typical example - // is authentication middleware ; OPTIONS requests won't carry authentication - // headers (see #1) - if c.optionPassthrough { - next.ServeHTTP(w, r) - } else { - w.WriteHeader(http.StatusOK) - } - } else { - c.logf("Handler: Actual request") - c.handleActualRequest(w, r) - next.ServeHTTP(w, r) - } - }) -} - -// handlePreflight handles pre-flight CORS requests -func (c *Cors) handlePreflight(w http.ResponseWriter, r *http.Request) { - headers := w.Header() - origin := r.Header.Get("Origin") - - if r.Method != http.MethodOptions { - c.logf("Preflight aborted: %s!=OPTIONS", r.Method) - return - } - // Always set Vary headers - // see https://github.com/rs/cors/issues/10, - // https://github.com/rs/cors/commit/dbdca4d95feaa7511a46e6f1efb3b3aa505bc43f#commitcomment-12352001 - headers.Add("Vary", "Origin") - headers.Add("Vary", "Access-Control-Request-Method") - headers.Add("Vary", "Access-Control-Request-Headers") - - if origin == "" { - c.logf("Preflight aborted: empty origin") - return - } - if !c.isOriginAllowed(r, origin) { - c.logf("Preflight aborted: origin '%s' not allowed", origin) - return - } - - reqMethod := r.Header.Get("Access-Control-Request-Method") - if !c.isMethodAllowed(reqMethod) { - c.logf("Preflight aborted: method '%s' not allowed", reqMethod) - return - } - reqHeaders := parseHeaderList(r.Header.Get("Access-Control-Request-Headers")) - if !c.areHeadersAllowed(reqHeaders) { - c.logf("Preflight aborted: headers '%v' not allowed", reqHeaders) - return - } - if c.allowedOriginsAll { - headers.Set("Access-Control-Allow-Origin", "*") - } else { - headers.Set("Access-Control-Allow-Origin", origin) - } - // Spec says: Since the list of methods can be unbounded, simply returning the method indicated - // by Access-Control-Request-Method (if supported) can be enough - headers.Set("Access-Control-Allow-Methods", strings.ToUpper(reqMethod)) - if len(reqHeaders) > 0 { - - // Spec says: Since the list of headers can be unbounded, simply returning supported headers - // from Access-Control-Request-Headers can be enough - headers.Set("Access-Control-Allow-Headers", strings.Join(reqHeaders, ", ")) - } - if c.allowCredentials { - headers.Set("Access-Control-Allow-Credentials", "true") - } - if c.maxAge > 0 { - headers.Set("Access-Control-Max-Age", strconv.Itoa(c.maxAge)) - } - c.logf("Preflight response headers: %v", headers) -} - -// handleActualRequest handles simple cross-origin requests, actual request or redirects -func (c *Cors) handleActualRequest(w http.ResponseWriter, r *http.Request) { - headers := w.Header() - origin := r.Header.Get("Origin") - - // Always set Vary, see https://github.com/rs/cors/issues/10 - headers.Add("Vary", "Origin") - if origin == "" { - c.logf("Actual request no headers added: missing origin") - return - } - if !c.isOriginAllowed(r, origin) { - c.logf("Actual request no headers added: origin '%s' not allowed", origin) - return - } - - // Note that spec does define a way to specifically disallow a simple method like GET or - // POST. Access-Control-Allow-Methods is only used for pre-flight requests and the - // spec doesn't instruct to check the allowed methods for simple cross-origin requests. - // We think it's a nice feature to be able to have control on those methods though. - if !c.isMethodAllowed(r.Method) { - c.logf("Actual request no headers added: method '%s' not allowed", r.Method) - - return - } - if c.allowedOriginsAll { - headers.Set("Access-Control-Allow-Origin", "*") - } else { - headers.Set("Access-Control-Allow-Origin", origin) - } - if len(c.exposedHeaders) > 0 { - headers.Set("Access-Control-Expose-Headers", strings.Join(c.exposedHeaders, ", ")) - } - if c.allowCredentials { - headers.Set("Access-Control-Allow-Credentials", "true") - } - c.logf("Actual response added headers: %v", headers) -} - -// convenience method. checks if a logger is set. -func (c *Cors) logf(format string, a ...interface{}) { - if c.Log != nil { - c.Log.Printf(format, a...) - } -} - -// isOriginAllowed checks if a given origin is allowed to perform cross-domain requests -// on the endpoint -func (c *Cors) isOriginAllowed(r *http.Request, origin string) bool { - if c.allowOriginFunc != nil { - return c.allowOriginFunc(r, origin) - } - if c.allowedOriginsAll { - return true - } - origin = strings.ToLower(origin) - for _, o := range c.allowedOrigins { - if o == origin { - return true - } - } - for _, w := range c.allowedWOrigins { - if w.match(origin) { - return true - } - } - return false -} - -// isMethodAllowed checks if a given method can be used as part of a cross-domain request -// on the endpoint -func (c *Cors) isMethodAllowed(method string) bool { - if len(c.allowedMethods) == 0 { - // If no method allowed, always return false, even for preflight request - return false - } - method = strings.ToUpper(method) - if method == http.MethodOptions { - // Always allow preflight requests - return true - } - for _, m := range c.allowedMethods { - if m == method { - return true - } - } - return false -} - -// areHeadersAllowed checks if a given list of headers are allowed to used within -// a cross-domain request. -func (c *Cors) areHeadersAllowed(requestedHeaders []string) bool { - if c.allowedHeadersAll || len(requestedHeaders) == 0 { - return true - } - for _, header := range requestedHeaders { - header = http.CanonicalHeaderKey(header) - found := false - for _, h := range c.allowedHeaders { - if h == header { - found = true - break - } - } - if !found { - return false - } - } - return true -} +// cors package is net/http handler to handle CORS related requests +// as defined by http://www.w3.org/TR/cors/ +// +// You can configure it by passing an option struct to cors.New: +// +// c := cors.New(cors.Options{ +// AllowedOrigins: []string{"foo.com"}, +// AllowedMethods: []string{"GET", "POST", "DELETE"}, +// AllowCredentials: true, +// }) +// +// Then insert the handler in the chain: +// +// handler = c.Handler(handler) +// +// See Options documentation for more options. +// +// The resulting handler is a standard net/http handler. +package cors + +import ( + "log" + "net/http" + "os" + "strconv" + "strings" +) + +// Options is a configuration container to setup the CORS middleware. +type Options struct { + // AllowedOrigins is a list of origins a cross-domain request can be executed from. + // If the special "*" value is present in the list, all origins will be allowed. + // An origin may contain a wildcard (*) to replace 0 or more characters + // (i.e.: http://*.domain.com). Usage of wildcards implies a small performance penalty. + // Only one wildcard can be used per origin. + // Default value is ["*"] + AllowedOrigins []string + + // AllowOriginFunc is a custom function to validate the origin. It takes the origin + // as argument and returns true if allowed or false otherwise. If this option is + // set, the content of AllowedOrigins is ignored. + AllowOriginFunc func(r *http.Request, origin string) bool + + // AllowedMethods is a list of methods the client is allowed to use with + // cross-domain requests. Default value is simple methods (HEAD, GET and POST). + AllowedMethods []string + + // AllowedHeaders is list of non simple headers the client is allowed to use with + // cross-domain requests. + // If the special "*" value is present in the list, all headers will be allowed. + // Default value is [] but "Origin" is always appended to the list. + AllowedHeaders []string + + // ExposedHeaders indicates which headers are safe to expose to the API of a CORS + // API specification + ExposedHeaders []string + + // AllowCredentials indicates whether the request can include user credentials like + // cookies, HTTP authentication or client side SSL certificates. + AllowCredentials bool + + // MaxAge indicates how long (in seconds) the results of a preflight request + // can be cached + MaxAge int + + // OptionsPassthrough instructs preflight to let other potential next handlers to + // process the OPTIONS method. Turn this on if your application handles OPTIONS. + OptionsPassthrough bool + + // Debugging flag adds additional output to debug server side CORS issues + Debug bool +} + +// Logger generic interface for logger +type Logger interface { + Printf(string, ...interface{}) +} + +// Cors http handler +type Cors struct { + // Debug logger + Log Logger + + // Normalized list of plain allowed origins + allowedOrigins []string + + // List of allowed origins containing wildcards + allowedWOrigins []wildcard + + // Optional origin validator function + allowOriginFunc func(r *http.Request, origin string) bool + + // Normalized list of allowed headers + allowedHeaders []string + + // Normalized list of allowed methods + allowedMethods []string + + // Normalized list of exposed headers + exposedHeaders []string + maxAge int + + // Set to true when allowed origins contains a "*" + allowedOriginsAll bool + + // Set to true when allowed headers contains a "*" + allowedHeadersAll bool + + allowCredentials bool + optionPassthrough bool +} + +// New creates a new Cors handler with the provided options. +func New(options Options) *Cors { + c := &Cors{ + exposedHeaders: convert(options.ExposedHeaders, http.CanonicalHeaderKey), + allowOriginFunc: options.AllowOriginFunc, + allowCredentials: options.AllowCredentials, + maxAge: options.MaxAge, + optionPassthrough: options.OptionsPassthrough, + } + if options.Debug && c.Log == nil { + c.Log = log.New(os.Stdout, "[cors] ", log.LstdFlags) + } + + // Normalize options + // Note: for origins and methods matching, the spec requires a case-sensitive matching. + // As it may error prone, we chose to ignore the spec here. + + // Allowed Origins + if len(options.AllowedOrigins) == 0 { + if options.AllowOriginFunc == nil { + // Default is all origins + c.allowedOriginsAll = true + } + } else { + c.allowedOrigins = []string{} + c.allowedWOrigins = []wildcard{} + for _, origin := range options.AllowedOrigins { + // Normalize + origin = strings.ToLower(origin) + if origin == "*" { + // If "*" is present in the list, turn the whole list into a match all + c.allowedOriginsAll = true + c.allowedOrigins = nil + c.allowedWOrigins = nil + break + } else if i := strings.IndexByte(origin, '*'); i >= 0 { + // Split the origin in two: start and end string without the * + w := wildcard{origin[0:i], origin[i+1:]} + c.allowedWOrigins = append(c.allowedWOrigins, w) + } else { + c.allowedOrigins = append(c.allowedOrigins, origin) + } + } + } + + // Allowed Headers + if len(options.AllowedHeaders) == 0 { + // Use sensible defaults + c.allowedHeaders = []string{"Origin", "Accept", "Content-Type"} + } else { + // Origin is always appended as some browsers will always request for this header at preflight + c.allowedHeaders = convert(append(options.AllowedHeaders, "Origin"), http.CanonicalHeaderKey) + for _, h := range options.AllowedHeaders { + if h == "*" { + c.allowedHeadersAll = true + c.allowedHeaders = nil + break + } + } + } + + // Allowed Methods + if len(options.AllowedMethods) == 0 { + // Default is spec's "simple" methods + c.allowedMethods = []string{http.MethodGet, http.MethodPost, http.MethodHead} + } else { + c.allowedMethods = convert(options.AllowedMethods, strings.ToUpper) + } + + return c +} + +// Handler creates a new Cors handler with passed options. +func Handler(options Options) func(next http.Handler) http.Handler { + c := New(options) + return c.Handler +} + +// AllowAll create a new Cors handler with permissive configuration allowing all +// origins with all standard methods with any header and credentials. +func AllowAll() *Cors { + return New(Options{ + AllowedOrigins: []string{"*"}, + AllowedMethods: []string{ + http.MethodHead, + http.MethodGet, + http.MethodPost, + http.MethodPut, + http.MethodPatch, + http.MethodDelete, + }, + AllowedHeaders: []string{"*"}, + AllowCredentials: false, + }) +} + +// Handler apply the CORS specification on the request, and add relevant CORS headers +// as necessary. +func (c *Cors) Handler(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method == http.MethodOptions && r.Header.Get("Access-Control-Request-Method") != "" { + c.logf("Handler: Preflight request") + c.handlePreflight(w, r) + // Preflight requests are standalone and should stop the chain as some other + // middleware may not handle OPTIONS requests correctly. One typical example + // is authentication middleware ; OPTIONS requests won't carry authentication + // headers (see #1) + if c.optionPassthrough { + next.ServeHTTP(w, r) + } else { + w.WriteHeader(http.StatusOK) + } + } else { + c.logf("Handler: Actual request") + c.handleActualRequest(w, r) + next.ServeHTTP(w, r) + } + }) +} + +// handlePreflight handles pre-flight CORS requests +func (c *Cors) handlePreflight(w http.ResponseWriter, r *http.Request) { + headers := w.Header() + origin := r.Header.Get("Origin") + + if r.Method != http.MethodOptions { + c.logf("Preflight aborted: %s!=OPTIONS", r.Method) + return + } + // Always set Vary headers + // see https://github.com/rs/cors/issues/10, + // https://github.com/rs/cors/commit/dbdca4d95feaa7511a46e6f1efb3b3aa505bc43f#commitcomment-12352001 + headers.Add("Vary", "Origin") + headers.Add("Vary", "Access-Control-Request-Method") + headers.Add("Vary", "Access-Control-Request-Headers") + + if origin == "" { + c.logf("Preflight aborted: empty origin") + return + } + if !c.isOriginAllowed(r, origin) { + c.logf("Preflight aborted: origin '%s' not allowed", origin) + return + } + + reqMethod := r.Header.Get("Access-Control-Request-Method") + if !c.isMethodAllowed(reqMethod) { + c.logf("Preflight aborted: method '%s' not allowed", reqMethod) + return + } + reqHeaders := parseHeaderList(r.Header.Get("Access-Control-Request-Headers")) + if !c.areHeadersAllowed(reqHeaders) { + c.logf("Preflight aborted: headers '%v' not allowed", reqHeaders) + return + } + if c.allowedOriginsAll { + headers.Set("Access-Control-Allow-Origin", "*") + } else { + headers.Set("Access-Control-Allow-Origin", origin) + } + // Spec says: Since the list of methods can be unbounded, simply returning the method indicated + // by Access-Control-Request-Method (if supported) can be enough + headers.Set("Access-Control-Allow-Methods", strings.ToUpper(reqMethod)) + if len(reqHeaders) > 0 { + + // Spec says: Since the list of headers can be unbounded, simply returning supported headers + // from Access-Control-Request-Headers can be enough + headers.Set("Access-Control-Allow-Headers", strings.Join(reqHeaders, ", ")) + } + if c.allowCredentials { + headers.Set("Access-Control-Allow-Credentials", "true") + } + if c.maxAge > 0 { + headers.Set("Access-Control-Max-Age", strconv.Itoa(c.maxAge)) + } + c.logf("Preflight response headers: %v", headers) +} + +// handleActualRequest handles simple cross-origin requests, actual request or redirects +func (c *Cors) handleActualRequest(w http.ResponseWriter, r *http.Request) { + headers := w.Header() + origin := r.Header.Get("Origin") + + // Always set Vary, see https://github.com/rs/cors/issues/10 + headers.Add("Vary", "Origin") + if origin == "" { + c.logf("Actual request no headers added: missing origin") + return + } + if !c.isOriginAllowed(r, origin) { + c.logf("Actual request no headers added: origin '%s' not allowed", origin) + return + } + + // Note that spec does define a way to specifically disallow a simple method like GET or + // POST. Access-Control-Allow-Methods is only used for pre-flight requests and the + // spec doesn't instruct to check the allowed methods for simple cross-origin requests. + // We think it's a nice feature to be able to have control on those methods though. + if !c.isMethodAllowed(r.Method) { + c.logf("Actual request no headers added: method '%s' not allowed", r.Method) + + return + } + if c.allowedOriginsAll { + headers.Set("Access-Control-Allow-Origin", "*") + } else { + headers.Set("Access-Control-Allow-Origin", origin) + } + if len(c.exposedHeaders) > 0 { + headers.Set("Access-Control-Expose-Headers", strings.Join(c.exposedHeaders, ", ")) + } + if c.allowCredentials { + headers.Set("Access-Control-Allow-Credentials", "true") + } + c.logf("Actual response added headers: %v", headers) +} + +// convenience method. checks if a logger is set. +func (c *Cors) logf(format string, a ...interface{}) { + if c.Log != nil { + c.Log.Printf(format, a...) + } +} + +// isOriginAllowed checks if a given origin is allowed to perform cross-domain requests +// on the endpoint +func (c *Cors) isOriginAllowed(r *http.Request, origin string) bool { + if c.allowOriginFunc != nil { + return c.allowOriginFunc(r, origin) + } + if c.allowedOriginsAll { + return true + } + origin = strings.ToLower(origin) + for _, o := range c.allowedOrigins { + if o == origin { + return true + } + } + for _, w := range c.allowedWOrigins { + if w.match(origin) { + return true + } + } + return false +} + +// isMethodAllowed checks if a given method can be used as part of a cross-domain request +// on the endpoint +func (c *Cors) isMethodAllowed(method string) bool { + if len(c.allowedMethods) == 0 { + // If no method allowed, always return false, even for preflight request + return false + } + method = strings.ToUpper(method) + if method == http.MethodOptions { + // Always allow preflight requests + return true + } + for _, m := range c.allowedMethods { + if m == method { + return true + } + } + return false +} + +// areHeadersAllowed checks if a given list of headers are allowed to used within +// a cross-domain request. +func (c *Cors) areHeadersAllowed(requestedHeaders []string) bool { + if c.allowedHeadersAll || len(requestedHeaders) == 0 { + return true + } + for _, header := range requestedHeaders { + header = http.CanonicalHeaderKey(header) + found := false + for _, h := range c.allowedHeaders { + if h == header { + found = true + break + } + } + if !found { + return false + } + } + return true +} diff --git a/vendor/github.com/go-chi/cors/utils.go b/vendor/github.com/go-chi/cors/utils.go index 3fe5a5aeeb..47efcbb4b6 100644 --- a/vendor/github.com/go-chi/cors/utils.go +++ b/vendor/github.com/go-chi/cors/utils.go @@ -1,70 +1,70 @@ -package cors - -import "strings" - -const toLower = 'a' - 'A' - -type converter func(string) string - -type wildcard struct { - prefix string - suffix string -} - -func (w wildcard) match(s string) bool { - return len(s) >= len(w.prefix+w.suffix) && strings.HasPrefix(s, w.prefix) && strings.HasSuffix(s, w.suffix) -} - -// convert converts a list of string using the passed converter function -func convert(s []string, c converter) []string { - out := []string{} - for _, i := range s { - out = append(out, c(i)) - } - return out -} - -// parseHeaderList tokenize + normalize a string containing a list of headers -func parseHeaderList(headerList string) []string { - l := len(headerList) - h := make([]byte, 0, l) - upper := true - // Estimate the number headers in order to allocate the right splice size - t := 0 - for i := 0; i < l; i++ { - if headerList[i] == ',' { - t++ - } - } - headers := make([]string, 0, t) - for i := 0; i < l; i++ { - b := headerList[i] - if b >= 'a' && b <= 'z' { - if upper { - h = append(h, b-toLower) - } else { - h = append(h, b) - } - } else if b >= 'A' && b <= 'Z' { - if !upper { - h = append(h, b+toLower) - } else { - h = append(h, b) - } - } else if b == '-' || b == '_' || b == '.' || (b >= '0' && b <= '9') { - h = append(h, b) - } - - if b == ' ' || b == ',' || i == l-1 { - if len(h) > 0 { - // Flush the found header - headers = append(headers, string(h)) - h = h[:0] - upper = true - } - } else { - upper = b == '-' - } - } - return headers -} +package cors + +import "strings" + +const toLower = 'a' - 'A' + +type converter func(string) string + +type wildcard struct { + prefix string + suffix string +} + +func (w wildcard) match(s string) bool { + return len(s) >= len(w.prefix+w.suffix) && strings.HasPrefix(s, w.prefix) && strings.HasSuffix(s, w.suffix) +} + +// convert converts a list of string using the passed converter function +func convert(s []string, c converter) []string { + out := []string{} + for _, i := range s { + out = append(out, c(i)) + } + return out +} + +// parseHeaderList tokenize + normalize a string containing a list of headers +func parseHeaderList(headerList string) []string { + l := len(headerList) + h := make([]byte, 0, l) + upper := true + // Estimate the number headers in order to allocate the right splice size + t := 0 + for i := 0; i < l; i++ { + if headerList[i] == ',' { + t++ + } + } + headers := make([]string, 0, t) + for i := 0; i < l; i++ { + b := headerList[i] + if b >= 'a' && b <= 'z' { + if upper { + h = append(h, b-toLower) + } else { + h = append(h, b) + } + } else if b >= 'A' && b <= 'Z' { + if !upper { + h = append(h, b+toLower) + } else { + h = append(h, b) + } + } else if b == '-' || b == '_' || b == '.' || (b >= '0' && b <= '9') { + h = append(h, b) + } + + if b == ' ' || b == ',' || i == l-1 { + if len(h) > 0 { + // Flush the found header + headers = append(headers, string(h)) + h = h[:0] + upper = true + } + } else { + upper = b == '-' + } + } + return headers +} diff --git a/vendor/github.com/google/uuid/.travis.yml b/vendor/github.com/google/uuid/.travis.yml index d8156a60ba..487eaf24be 100644 --- a/vendor/github.com/google/uuid/.travis.yml +++ b/vendor/github.com/google/uuid/.travis.yml @@ -1,9 +1,9 @@ -language: go - -go: - - 1.4.3 - - 1.5.3 - - tip - -script: - - go test -v ./... +language: go + +go: + - 1.4.3 + - 1.5.3 + - tip + +script: + - go test -v ./... diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md index 04fdf09f13..acc36eacdb 100644 --- a/vendor/github.com/google/uuid/CONTRIBUTING.md +++ b/vendor/github.com/google/uuid/CONTRIBUTING.md @@ -1,10 +1,10 @@ -# How to contribute - -We definitely welcome patches and contribution to this project! - -### Legal requirements - -In order to protect both you and ourselves, you will need to sign the -[Contributor License Agreement](https://cla.developers.google.com/clas). - -You may have already signed it for other Google projects. +# How to contribute + +We definitely welcome patches and contribution to this project! + +### Legal requirements + +In order to protect both you and ourselves, you will need to sign the +[Contributor License Agreement](https://cla.developers.google.com/clas). + +You may have already signed it for other Google projects. diff --git a/vendor/github.com/google/uuid/CONTRIBUTORS b/vendor/github.com/google/uuid/CONTRIBUTORS index b4bb97f6bc..56d4196099 100644 --- a/vendor/github.com/google/uuid/CONTRIBUTORS +++ b/vendor/github.com/google/uuid/CONTRIBUTORS @@ -1,9 +1,9 @@ -Paul Borman -bmatsuo -shawnps -theory -jboverfelt -dsymonds -cd1 -wallclockbuilder -dansouza +Paul Borman +bmatsuo +shawnps +theory +jboverfelt +dsymonds +cd1 +wallclockbuilder +dansouza diff --git a/vendor/github.com/google/uuid/LICENSE b/vendor/github.com/google/uuid/LICENSE index 5dc68268d9..b5c4e63311 100644 --- a/vendor/github.com/google/uuid/LICENSE +++ b/vendor/github.com/google/uuid/LICENSE @@ -1,27 +1,27 @@ -Copyright (c) 2009,2014 Google Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +Copyright (c) 2009,2014 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md index f765a46f91..8162dd937e 100644 --- a/vendor/github.com/google/uuid/README.md +++ b/vendor/github.com/google/uuid/README.md @@ -1,19 +1,19 @@ -# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master) -The uuid package generates and inspects UUIDs based on -[RFC 4122](http://tools.ietf.org/html/rfc4122) -and DCE 1.1: Authentication and Security Services. - -This package is based on the github.com/pborman/uuid package (previously named -code.google.com/p/go-uuid). It differs from these earlier packages in that -a UUID is a 16 byte array rather than a byte slice. One loss due to this -change is the ability to represent an invalid UUID (vs a NIL UUID). - -###### Install -`go get github.com/google/uuid` - -###### Documentation -[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid) - -Full `go doc` style documentation for the package can be viewed online without -installing this package by using the GoDoc site here: -http://pkg.go.dev/github.com/google/uuid +# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master) +The uuid package generates and inspects UUIDs based on +[RFC 4122](http://tools.ietf.org/html/rfc4122) +and DCE 1.1: Authentication and Security Services. + +This package is based on the github.com/pborman/uuid package (previously named +code.google.com/p/go-uuid). It differs from these earlier packages in that +a UUID is a 16 byte array rather than a byte slice. One loss due to this +change is the ability to represent an invalid UUID (vs a NIL UUID). + +###### Install +`go get github.com/google/uuid` + +###### Documentation +[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid) + +Full `go doc` style documentation for the package can be viewed online without +installing this package by using the GoDoc site here: +http://pkg.go.dev/github.com/google/uuid diff --git a/vendor/github.com/google/uuid/dce.go b/vendor/github.com/google/uuid/dce.go index fa820b9d30..cf2802f23a 100644 --- a/vendor/github.com/google/uuid/dce.go +++ b/vendor/github.com/google/uuid/dce.go @@ -1,80 +1,80 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" - "fmt" - "os" -) - -// A Domain represents a Version 2 domain -type Domain byte - -// Domain constants for DCE Security (Version 2) UUIDs. -const ( - Person = Domain(0) - Group = Domain(1) - Org = Domain(2) -) - -// NewDCESecurity returns a DCE Security (Version 2) UUID. -// -// The domain should be one of Person, Group or Org. -// On a POSIX system the id should be the users UID for the Person -// domain and the users GID for the Group. The meaning of id for -// the domain Org or on non-POSIX systems is site defined. -// -// For a given domain/id pair the same token may be returned for up to -// 7 minutes and 10 seconds. -func NewDCESecurity(domain Domain, id uint32) (UUID, error) { - uuid, err := NewUUID() - if err == nil { - uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 - uuid[9] = byte(domain) - binary.BigEndian.PutUint32(uuid[0:], id) - } - return uuid, err -} - -// NewDCEPerson returns a DCE Security (Version 2) UUID in the person -// domain with the id returned by os.Getuid. -// -// NewDCESecurity(Person, uint32(os.Getuid())) -func NewDCEPerson() (UUID, error) { - return NewDCESecurity(Person, uint32(os.Getuid())) -} - -// NewDCEGroup returns a DCE Security (Version 2) UUID in the group -// domain with the id returned by os.Getgid. -// -// NewDCESecurity(Group, uint32(os.Getgid())) -func NewDCEGroup() (UUID, error) { - return NewDCESecurity(Group, uint32(os.Getgid())) -} - -// Domain returns the domain for a Version 2 UUID. Domains are only defined -// for Version 2 UUIDs. -func (uuid UUID) Domain() Domain { - return Domain(uuid[9]) -} - -// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2 -// UUIDs. -func (uuid UUID) ID() uint32 { - return binary.BigEndian.Uint32(uuid[0:4]) -} - -func (d Domain) String() string { - switch d { - case Person: - return "Person" - case Group: - return "Group" - case Org: - return "Org" - } - return fmt.Sprintf("Domain%d", int(d)) -} +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "fmt" + "os" +) + +// A Domain represents a Version 2 domain +type Domain byte + +// Domain constants for DCE Security (Version 2) UUIDs. +const ( + Person = Domain(0) + Group = Domain(1) + Org = Domain(2) +) + +// NewDCESecurity returns a DCE Security (Version 2) UUID. +// +// The domain should be one of Person, Group or Org. +// On a POSIX system the id should be the users UID for the Person +// domain and the users GID for the Group. The meaning of id for +// the domain Org or on non-POSIX systems is site defined. +// +// For a given domain/id pair the same token may be returned for up to +// 7 minutes and 10 seconds. +func NewDCESecurity(domain Domain, id uint32) (UUID, error) { + uuid, err := NewUUID() + if err == nil { + uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 + uuid[9] = byte(domain) + binary.BigEndian.PutUint32(uuid[0:], id) + } + return uuid, err +} + +// NewDCEPerson returns a DCE Security (Version 2) UUID in the person +// domain with the id returned by os.Getuid. +// +// NewDCESecurity(Person, uint32(os.Getuid())) +func NewDCEPerson() (UUID, error) { + return NewDCESecurity(Person, uint32(os.Getuid())) +} + +// NewDCEGroup returns a DCE Security (Version 2) UUID in the group +// domain with the id returned by os.Getgid. +// +// NewDCESecurity(Group, uint32(os.Getgid())) +func NewDCEGroup() (UUID, error) { + return NewDCESecurity(Group, uint32(os.Getgid())) +} + +// Domain returns the domain for a Version 2 UUID. Domains are only defined +// for Version 2 UUIDs. +func (uuid UUID) Domain() Domain { + return Domain(uuid[9]) +} + +// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2 +// UUIDs. +func (uuid UUID) ID() uint32 { + return binary.BigEndian.Uint32(uuid[0:4]) +} + +func (d Domain) String() string { + switch d { + case Person: + return "Person" + case Group: + return "Group" + case Org: + return "Org" + } + return fmt.Sprintf("Domain%d", int(d)) +} diff --git a/vendor/github.com/google/uuid/doc.go b/vendor/github.com/google/uuid/doc.go index 5b8a4b9af8..cdfce7fab6 100644 --- a/vendor/github.com/google/uuid/doc.go +++ b/vendor/github.com/google/uuid/doc.go @@ -1,12 +1,12 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package uuid generates and inspects UUIDs. -// -// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security -// Services. -// -// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to -// maps or compared directly. -package uuid +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uuid generates and inspects UUIDs. +// +// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security +// Services. +// +// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to +// maps or compared directly. +package uuid diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go index b404f4bec2..72ce847b11 100644 --- a/vendor/github.com/google/uuid/hash.go +++ b/vendor/github.com/google/uuid/hash.go @@ -1,53 +1,53 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "crypto/md5" - "crypto/sha1" - "hash" -) - -// Well known namespace IDs and UUIDs -var ( - NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) - NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) - NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) - NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) - Nil UUID // empty UUID, all zeros -) - -// NewHash returns a new UUID derived from the hash of space concatenated with -// data generated by h. The hash should be at least 16 byte in length. The -// first 16 bytes of the hash are used to form the UUID. The version of the -// UUID will be the lower 4 bits of version. NewHash is used to implement -// NewMD5 and NewSHA1. -func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { - h.Reset() - h.Write(space[:]) //nolint:errcheck - h.Write(data) //nolint:errcheck - s := h.Sum(nil) - var uuid UUID - copy(uuid[:], s) - uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) - uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant - return uuid -} - -// NewMD5 returns a new MD5 (Version 3) UUID based on the -// supplied name space and data. It is the same as calling: -// -// NewHash(md5.New(), space, data, 3) -func NewMD5(space UUID, data []byte) UUID { - return NewHash(md5.New(), space, data, 3) -} - -// NewSHA1 returns a new SHA1 (Version 5) UUID based on the -// supplied name space and data. It is the same as calling: -// -// NewHash(sha1.New(), space, data, 5) -func NewSHA1(space UUID, data []byte) UUID { - return NewHash(sha1.New(), space, data, 5) -} +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "crypto/md5" + "crypto/sha1" + "hash" +) + +// Well known namespace IDs and UUIDs +var ( + NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) + Nil UUID // empty UUID, all zeros +) + +// NewHash returns a new UUID derived from the hash of space concatenated with +// data generated by h. The hash should be at least 16 byte in length. The +// first 16 bytes of the hash are used to form the UUID. The version of the +// UUID will be the lower 4 bits of version. NewHash is used to implement +// NewMD5 and NewSHA1. +func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { + h.Reset() + h.Write(space[:]) //nolint:errcheck + h.Write(data) //nolint:errcheck + s := h.Sum(nil) + var uuid UUID + copy(uuid[:], s) + uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) + uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant + return uuid +} + +// NewMD5 returns a new MD5 (Version 3) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(md5.New(), space, data, 3) +func NewMD5(space UUID, data []byte) UUID { + return NewHash(md5.New(), space, data, 3) +} + +// NewSHA1 returns a new SHA1 (Version 5) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(sha1.New(), space, data, 5) +func NewSHA1(space UUID, data []byte) UUID { + return NewHash(sha1.New(), space, data, 5) +} diff --git a/vendor/github.com/google/uuid/marshal.go b/vendor/github.com/google/uuid/marshal.go index 14bd34072b..5476307f5d 100644 --- a/vendor/github.com/google/uuid/marshal.go +++ b/vendor/github.com/google/uuid/marshal.go @@ -1,38 +1,38 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import "fmt" - -// MarshalText implements encoding.TextMarshaler. -func (uuid UUID) MarshalText() ([]byte, error) { - var js [36]byte - encodeHex(js[:], uuid) - return js[:], nil -} - -// UnmarshalText implements encoding.TextUnmarshaler. -func (uuid *UUID) UnmarshalText(data []byte) error { - id, err := ParseBytes(data) - if err != nil { - return err - } - *uuid = id - return nil -} - -// MarshalBinary implements encoding.BinaryMarshaler. -func (uuid UUID) MarshalBinary() ([]byte, error) { - return uuid[:], nil -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler. -func (uuid *UUID) UnmarshalBinary(data []byte) error { - if len(data) != 16 { - return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) - } - copy(uuid[:], data) - return nil -} +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "fmt" + +// MarshalText implements encoding.TextMarshaler. +func (uuid UUID) MarshalText() ([]byte, error) { + var js [36]byte + encodeHex(js[:], uuid) + return js[:], nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (uuid *UUID) UnmarshalText(data []byte) error { + id, err := ParseBytes(data) + if err != nil { + return err + } + *uuid = id + return nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (uuid UUID) MarshalBinary() ([]byte, error) { + return uuid[:], nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (uuid *UUID) UnmarshalBinary(data []byte) error { + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + copy(uuid[:], data) + return nil +} diff --git a/vendor/github.com/google/uuid/node.go b/vendor/github.com/google/uuid/node.go index d651a2b061..0d7d3a55d5 100644 --- a/vendor/github.com/google/uuid/node.go +++ b/vendor/github.com/google/uuid/node.go @@ -1,90 +1,90 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "sync" -) - -var ( - nodeMu sync.Mutex - ifname string // name of interface being used - nodeID [6]byte // hardware for version 1 UUIDs - zeroID [6]byte // nodeID with only 0's -) - -// NodeInterface returns the name of the interface from which the NodeID was -// derived. The interface "user" is returned if the NodeID was set by -// SetNodeID. -func NodeInterface() string { - defer nodeMu.Unlock() - nodeMu.Lock() - return ifname -} - -// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. -// If name is "" then the first usable interface found will be used or a random -// Node ID will be generated. If a named interface cannot be found then false -// is returned. -// -// SetNodeInterface never fails when name is "". -func SetNodeInterface(name string) bool { - defer nodeMu.Unlock() - nodeMu.Lock() - return setNodeInterface(name) -} - -func setNodeInterface(name string) bool { - iname, addr := getHardwareInterface(name) // null implementation for js - if iname != "" && addr != nil { - ifname = iname - copy(nodeID[:], addr) - return true - } - - // We found no interfaces with a valid hardware address. If name - // does not specify a specific interface generate a random Node ID - // (section 4.1.6) - if name == "" { - ifname = "random" - randomBits(nodeID[:]) - return true - } - return false -} - -// NodeID returns a slice of a copy of the current Node ID, setting the Node ID -// if not already set. -func NodeID() []byte { - defer nodeMu.Unlock() - nodeMu.Lock() - if nodeID == zeroID { - setNodeInterface("") - } - nid := nodeID - return nid[:] -} - -// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes -// of id are used. If id is less than 6 bytes then false is returned and the -// Node ID is not set. -func SetNodeID(id []byte) bool { - if len(id) < 6 { - return false - } - defer nodeMu.Unlock() - nodeMu.Lock() - copy(nodeID[:], id) - ifname = "user" - return true -} - -// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is -// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. -func (uuid UUID) NodeID() []byte { - var node [6]byte - copy(node[:], uuid[10:]) - return node[:] -} +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "sync" +) + +var ( + nodeMu sync.Mutex + ifname string // name of interface being used + nodeID [6]byte // hardware for version 1 UUIDs + zeroID [6]byte // nodeID with only 0's +) + +// NodeInterface returns the name of the interface from which the NodeID was +// derived. The interface "user" is returned if the NodeID was set by +// SetNodeID. +func NodeInterface() string { + defer nodeMu.Unlock() + nodeMu.Lock() + return ifname +} + +// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. +// If name is "" then the first usable interface found will be used or a random +// Node ID will be generated. If a named interface cannot be found then false +// is returned. +// +// SetNodeInterface never fails when name is "". +func SetNodeInterface(name string) bool { + defer nodeMu.Unlock() + nodeMu.Lock() + return setNodeInterface(name) +} + +func setNodeInterface(name string) bool { + iname, addr := getHardwareInterface(name) // null implementation for js + if iname != "" && addr != nil { + ifname = iname + copy(nodeID[:], addr) + return true + } + + // We found no interfaces with a valid hardware address. If name + // does not specify a specific interface generate a random Node ID + // (section 4.1.6) + if name == "" { + ifname = "random" + randomBits(nodeID[:]) + return true + } + return false +} + +// NodeID returns a slice of a copy of the current Node ID, setting the Node ID +// if not already set. +func NodeID() []byte { + defer nodeMu.Unlock() + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + nid := nodeID + return nid[:] +} + +// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes +// of id are used. If id is less than 6 bytes then false is returned and the +// Node ID is not set. +func SetNodeID(id []byte) bool { + if len(id) < 6 { + return false + } + defer nodeMu.Unlock() + nodeMu.Lock() + copy(nodeID[:], id) + ifname = "user" + return true +} + +// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is +// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) NodeID() []byte { + var node [6]byte + copy(node[:], uuid[10:]) + return node[:] +} diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go index 24b78edc90..fd38eb8830 100644 --- a/vendor/github.com/google/uuid/node_js.go +++ b/vendor/github.com/google/uuid/node_js.go @@ -1,12 +1,12 @@ -// Copyright 2017 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build js - -package uuid - -// getHardwareInterface returns nil values for the JS version of the code. -// This remvoves the "net" dependency, because it is not used in the browser. -// Using the "net" library inflates the size of the transpiled JS code by 673k bytes. -func getHardwareInterface(name string) (string, []byte) { return "", nil } +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build js + +package uuid + +// getHardwareInterface returns nil values for the JS version of the code. +// This remvoves the "net" dependency, because it is not used in the browser. +// Using the "net" library inflates the size of the transpiled JS code by 673k bytes. +func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/vendor/github.com/google/uuid/node_net.go b/vendor/github.com/google/uuid/node_net.go index 0cbbcddbd6..39db578a85 100644 --- a/vendor/github.com/google/uuid/node_net.go +++ b/vendor/github.com/google/uuid/node_net.go @@ -1,33 +1,33 @@ -// Copyright 2017 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !js - -package uuid - -import "net" - -var interfaces []net.Interface // cached list of interfaces - -// getHardwareInterface returns the name and hardware address of interface name. -// If name is "" then the name and hardware address of one of the system's -// interfaces is returned. If no interfaces are found (name does not exist or -// there are no interfaces) then "", nil is returned. -// -// Only addresses of at least 6 bytes are returned. -func getHardwareInterface(name string) (string, []byte) { - if interfaces == nil { - var err error - interfaces, err = net.Interfaces() - if err != nil { - return "", nil - } - } - for _, ifs := range interfaces { - if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { - return ifs.Name, ifs.HardwareAddr - } - } - return "", nil -} +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !js + +package uuid + +import "net" + +var interfaces []net.Interface // cached list of interfaces + +// getHardwareInterface returns the name and hardware address of interface name. +// If name is "" then the name and hardware address of one of the system's +// interfaces is returned. If no interfaces are found (name does not exist or +// there are no interfaces) then "", nil is returned. +// +// Only addresses of at least 6 bytes are returned. +func getHardwareInterface(name string) (string, []byte) { + if interfaces == nil { + var err error + interfaces, err = net.Interfaces() + if err != nil { + return "", nil + } + } + for _, ifs := range interfaces { + if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { + return ifs.Name, ifs.HardwareAddr + } + } + return "", nil +} diff --git a/vendor/github.com/google/uuid/null.go b/vendor/github.com/google/uuid/null.go index d7fcbf2865..6b0afbf00c 100644 --- a/vendor/github.com/google/uuid/null.go +++ b/vendor/github.com/google/uuid/null.go @@ -1,118 +1,118 @@ -// Copyright 2021 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "bytes" - "database/sql/driver" - "encoding/json" - "fmt" -) - -var jsonNull = []byte("null") - -// NullUUID represents a UUID that may be null. -// NullUUID implements the SQL driver.Scanner interface so -// it can be used as a scan destination: -// -// var u uuid.NullUUID -// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u) -// ... -// if u.Valid { -// // use u.UUID -// } else { -// // NULL value -// } -// -type NullUUID struct { - UUID UUID - Valid bool // Valid is true if UUID is not NULL -} - -// Scan implements the SQL driver.Scanner interface. -func (nu *NullUUID) Scan(value interface{}) error { - if value == nil { - nu.UUID, nu.Valid = Nil, false - return nil - } - - err := nu.UUID.Scan(value) - if err != nil { - nu.Valid = false - return err - } - - nu.Valid = true - return nil -} - -// Value implements the driver Valuer interface. -func (nu NullUUID) Value() (driver.Value, error) { - if !nu.Valid { - return nil, nil - } - // Delegate to UUID Value function - return nu.UUID.Value() -} - -// MarshalBinary implements encoding.BinaryMarshaler. -func (nu NullUUID) MarshalBinary() ([]byte, error) { - if nu.Valid { - return nu.UUID[:], nil - } - - return []byte(nil), nil -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler. -func (nu *NullUUID) UnmarshalBinary(data []byte) error { - if len(data) != 16 { - return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) - } - copy(nu.UUID[:], data) - nu.Valid = true - return nil -} - -// MarshalText implements encoding.TextMarshaler. -func (nu NullUUID) MarshalText() ([]byte, error) { - if nu.Valid { - return nu.UUID.MarshalText() - } - - return jsonNull, nil -} - -// UnmarshalText implements encoding.TextUnmarshaler. -func (nu *NullUUID) UnmarshalText(data []byte) error { - id, err := ParseBytes(data) - if err != nil { - nu.Valid = false - return err - } - nu.UUID = id - nu.Valid = true - return nil -} - -// MarshalJSON implements json.Marshaler. -func (nu NullUUID) MarshalJSON() ([]byte, error) { - if nu.Valid { - return json.Marshal(nu.UUID) - } - - return jsonNull, nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (nu *NullUUID) UnmarshalJSON(data []byte) error { - if bytes.Equal(data, jsonNull) { - *nu = NullUUID{} - return nil // valid null UUID - } - err := json.Unmarshal(data, &nu.UUID) - nu.Valid = err == nil - return err -} +// Copyright 2021 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "database/sql/driver" + "encoding/json" + "fmt" +) + +var jsonNull = []byte("null") + +// NullUUID represents a UUID that may be null. +// NullUUID implements the SQL driver.Scanner interface so +// it can be used as a scan destination: +// +// var u uuid.NullUUID +// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u) +// ... +// if u.Valid { +// // use u.UUID +// } else { +// // NULL value +// } +// +type NullUUID struct { + UUID UUID + Valid bool // Valid is true if UUID is not NULL +} + +// Scan implements the SQL driver.Scanner interface. +func (nu *NullUUID) Scan(value interface{}) error { + if value == nil { + nu.UUID, nu.Valid = Nil, false + return nil + } + + err := nu.UUID.Scan(value) + if err != nil { + nu.Valid = false + return err + } + + nu.Valid = true + return nil +} + +// Value implements the driver Valuer interface. +func (nu NullUUID) Value() (driver.Value, error) { + if !nu.Valid { + return nil, nil + } + // Delegate to UUID Value function + return nu.UUID.Value() +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (nu NullUUID) MarshalBinary() ([]byte, error) { + if nu.Valid { + return nu.UUID[:], nil + } + + return []byte(nil), nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (nu *NullUUID) UnmarshalBinary(data []byte) error { + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + copy(nu.UUID[:], data) + nu.Valid = true + return nil +} + +// MarshalText implements encoding.TextMarshaler. +func (nu NullUUID) MarshalText() ([]byte, error) { + if nu.Valid { + return nu.UUID.MarshalText() + } + + return jsonNull, nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (nu *NullUUID) UnmarshalText(data []byte) error { + id, err := ParseBytes(data) + if err != nil { + nu.Valid = false + return err + } + nu.UUID = id + nu.Valid = true + return nil +} + +// MarshalJSON implements json.Marshaler. +func (nu NullUUID) MarshalJSON() ([]byte, error) { + if nu.Valid { + return json.Marshal(nu.UUID) + } + + return jsonNull, nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (nu *NullUUID) UnmarshalJSON(data []byte) error { + if bytes.Equal(data, jsonNull) { + *nu = NullUUID{} + return nil // valid null UUID + } + err := json.Unmarshal(data, &nu.UUID) + nu.Valid = err == nil + return err +} diff --git a/vendor/github.com/google/uuid/sql.go b/vendor/github.com/google/uuid/sql.go index 2e02ec06c0..4e16c73d40 100644 --- a/vendor/github.com/google/uuid/sql.go +++ b/vendor/github.com/google/uuid/sql.go @@ -1,59 +1,59 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "database/sql/driver" - "fmt" -) - -// Scan implements sql.Scanner so UUIDs can be read from databases transparently. -// Currently, database types that map to string and []byte are supported. Please -// consult database-specific driver documentation for matching types. -func (uuid *UUID) Scan(src interface{}) error { - switch src := src.(type) { - case nil: - return nil - - case string: - // if an empty UUID comes from a table, we return a null UUID - if src == "" { - return nil - } - - // see Parse for required string format - u, err := Parse(src) - if err != nil { - return fmt.Errorf("Scan: %v", err) - } - - *uuid = u - - case []byte: - // if an empty UUID comes from a table, we return a null UUID - if len(src) == 0 { - return nil - } - - // assumes a simple slice of bytes if 16 bytes - // otherwise attempts to parse - if len(src) != 16 { - return uuid.Scan(string(src)) - } - copy((*uuid)[:], src) - - default: - return fmt.Errorf("Scan: unable to scan type %T into UUID", src) - } - - return nil -} - -// Value implements sql.Valuer so that UUIDs can be written to databases -// transparently. Currently, UUIDs map to strings. Please consult -// database-specific driver documentation for matching types. -func (uuid UUID) Value() (driver.Value, error) { - return uuid.String(), nil -} +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "database/sql/driver" + "fmt" +) + +// Scan implements sql.Scanner so UUIDs can be read from databases transparently. +// Currently, database types that map to string and []byte are supported. Please +// consult database-specific driver documentation for matching types. +func (uuid *UUID) Scan(src interface{}) error { + switch src := src.(type) { + case nil: + return nil + + case string: + // if an empty UUID comes from a table, we return a null UUID + if src == "" { + return nil + } + + // see Parse for required string format + u, err := Parse(src) + if err != nil { + return fmt.Errorf("Scan: %v", err) + } + + *uuid = u + + case []byte: + // if an empty UUID comes from a table, we return a null UUID + if len(src) == 0 { + return nil + } + + // assumes a simple slice of bytes if 16 bytes + // otherwise attempts to parse + if len(src) != 16 { + return uuid.Scan(string(src)) + } + copy((*uuid)[:], src) + + default: + return fmt.Errorf("Scan: unable to scan type %T into UUID", src) + } + + return nil +} + +// Value implements sql.Valuer so that UUIDs can be written to databases +// transparently. Currently, UUIDs map to strings. Please consult +// database-specific driver documentation for matching types. +func (uuid UUID) Value() (driver.Value, error) { + return uuid.String(), nil +} diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go index e6ef06cdc8..4f622c24d7 100644 --- a/vendor/github.com/google/uuid/time.go +++ b/vendor/github.com/google/uuid/time.go @@ -1,123 +1,123 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" - "sync" - "time" -) - -// A Time represents a time as the number of 100's of nanoseconds since 15 Oct -// 1582. -type Time int64 - -const ( - lillian = 2299160 // Julian day of 15 Oct 1582 - unix = 2440587 // Julian day of 1 Jan 1970 - epoch = unix - lillian // Days between epochs - g1582 = epoch * 86400 // seconds between epochs - g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs -) - -var ( - timeMu sync.Mutex - lasttime uint64 // last time we returned - clockSeq uint16 // clock sequence for this run - - timeNow = time.Now // for testing -) - -// UnixTime converts t the number of seconds and nanoseconds using the Unix -// epoch of 1 Jan 1970. -func (t Time) UnixTime() (sec, nsec int64) { - sec = int64(t - g1582ns100) - nsec = (sec % 10000000) * 100 - sec /= 10000000 - return sec, nsec -} - -// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and -// clock sequence as well as adjusting the clock sequence as needed. An error -// is returned if the current time cannot be determined. -func GetTime() (Time, uint16, error) { - defer timeMu.Unlock() - timeMu.Lock() - return getTime() -} - -func getTime() (Time, uint16, error) { - t := timeNow() - - // If we don't have a clock sequence already, set one. - if clockSeq == 0 { - setClockSequence(-1) - } - now := uint64(t.UnixNano()/100) + g1582ns100 - - // If time has gone backwards with this clock sequence then we - // increment the clock sequence - if now <= lasttime { - clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000 - } - lasttime = now - return Time(now), clockSeq, nil -} - -// ClockSequence returns the current clock sequence, generating one if not -// already set. The clock sequence is only used for Version 1 UUIDs. -// -// The uuid package does not use global static storage for the clock sequence or -// the last time a UUID was generated. Unless SetClockSequence is used, a new -// random clock sequence is generated the first time a clock sequence is -// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) -func ClockSequence() int { - defer timeMu.Unlock() - timeMu.Lock() - return clockSequence() -} - -func clockSequence() int { - if clockSeq == 0 { - setClockSequence(-1) - } - return int(clockSeq & 0x3fff) -} - -// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to -// -1 causes a new sequence to be generated. -func SetClockSequence(seq int) { - defer timeMu.Unlock() - timeMu.Lock() - setClockSequence(seq) -} - -func setClockSequence(seq int) { - if seq == -1 { - var b [2]byte - randomBits(b[:]) // clock sequence - seq = int(b[0])<<8 | int(b[1]) - } - oldSeq := clockSeq - clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant - if oldSeq != clockSeq { - lasttime = 0 - } -} - -// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in -// uuid. The time is only defined for version 1 and 2 UUIDs. -func (uuid UUID) Time() Time { - time := int64(binary.BigEndian.Uint32(uuid[0:4])) - time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 - time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 - return Time(time) -} - -// ClockSequence returns the clock sequence encoded in uuid. -// The clock sequence is only well defined for version 1 and 2 UUIDs. -func (uuid UUID) ClockSequence() int { - return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff -} +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "sync" + "time" +) + +// A Time represents a time as the number of 100's of nanoseconds since 15 Oct +// 1582. +type Time int64 + +const ( + lillian = 2299160 // Julian day of 15 Oct 1582 + unix = 2440587 // Julian day of 1 Jan 1970 + epoch = unix - lillian // Days between epochs + g1582 = epoch * 86400 // seconds between epochs + g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs +) + +var ( + timeMu sync.Mutex + lasttime uint64 // last time we returned + clockSeq uint16 // clock sequence for this run + + timeNow = time.Now // for testing +) + +// UnixTime converts t the number of seconds and nanoseconds using the Unix +// epoch of 1 Jan 1970. +func (t Time) UnixTime() (sec, nsec int64) { + sec = int64(t - g1582ns100) + nsec = (sec % 10000000) * 100 + sec /= 10000000 + return sec, nsec +} + +// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and +// clock sequence as well as adjusting the clock sequence as needed. An error +// is returned if the current time cannot be determined. +func GetTime() (Time, uint16, error) { + defer timeMu.Unlock() + timeMu.Lock() + return getTime() +} + +func getTime() (Time, uint16, error) { + t := timeNow() + + // If we don't have a clock sequence already, set one. + if clockSeq == 0 { + setClockSequence(-1) + } + now := uint64(t.UnixNano()/100) + g1582ns100 + + // If time has gone backwards with this clock sequence then we + // increment the clock sequence + if now <= lasttime { + clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000 + } + lasttime = now + return Time(now), clockSeq, nil +} + +// ClockSequence returns the current clock sequence, generating one if not +// already set. The clock sequence is only used for Version 1 UUIDs. +// +// The uuid package does not use global static storage for the clock sequence or +// the last time a UUID was generated. Unless SetClockSequence is used, a new +// random clock sequence is generated the first time a clock sequence is +// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) +func ClockSequence() int { + defer timeMu.Unlock() + timeMu.Lock() + return clockSequence() +} + +func clockSequence() int { + if clockSeq == 0 { + setClockSequence(-1) + } + return int(clockSeq & 0x3fff) +} + +// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to +// -1 causes a new sequence to be generated. +func SetClockSequence(seq int) { + defer timeMu.Unlock() + timeMu.Lock() + setClockSequence(seq) +} + +func setClockSequence(seq int) { + if seq == -1 { + var b [2]byte + randomBits(b[:]) // clock sequence + seq = int(b[0])<<8 | int(b[1]) + } + oldSeq := clockSeq + clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant + if oldSeq != clockSeq { + lasttime = 0 + } +} + +// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in +// uuid. The time is only defined for version 1 and 2 UUIDs. +func (uuid UUID) Time() Time { + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + return Time(time) +} + +// ClockSequence returns the clock sequence encoded in uuid. +// The clock sequence is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) ClockSequence() int { + return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff +} diff --git a/vendor/github.com/google/uuid/util.go b/vendor/github.com/google/uuid/util.go index 5ea6c73780..99d2ec2317 100644 --- a/vendor/github.com/google/uuid/util.go +++ b/vendor/github.com/google/uuid/util.go @@ -1,43 +1,43 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "io" -) - -// randomBits completely fills slice b with random data. -func randomBits(b []byte) { - if _, err := io.ReadFull(rander, b); err != nil { - panic(err.Error()) // rand should never fail - } -} - -// xvalues returns the value of a byte as a hexadecimal digit or 255. -var xvalues = [256]byte{ - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, - 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, -} - -// xtob converts hex characters x1 and x2 into a byte. -func xtob(x1, x2 byte) (byte, bool) { - b1 := xvalues[x1] - b2 := xvalues[x2] - return (b1 << 4) | b2, b1 != 255 && b2 != 255 -} +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// randomBits completely fills slice b with random data. +func randomBits(b []byte) { + if _, err := io.ReadFull(rander, b); err != nil { + panic(err.Error()) // rand should never fail + } +} + +// xvalues returns the value of a byte as a hexadecimal digit or 255. +var xvalues = [256]byte{ + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, +} + +// xtob converts hex characters x1 and x2 into a byte. +func xtob(x1, x2 byte) (byte, bool) { + b1 := xvalues[x1] + b2 := xvalues[x2] + return (b1 << 4) | b2, b1 != 255 && b2 != 255 +} diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go index a57207aeb6..7f9c99e107 100644 --- a/vendor/github.com/google/uuid/uuid.go +++ b/vendor/github.com/google/uuid/uuid.go @@ -1,294 +1,294 @@ -// Copyright 2018 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "bytes" - "crypto/rand" - "encoding/hex" - "errors" - "fmt" - "io" - "strings" - "sync" -) - -// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC -// 4122. -type UUID [16]byte - -// A Version represents a UUID's version. -type Version byte - -// A Variant represents a UUID's variant. -type Variant byte - -// Constants returned by Variant. -const ( - Invalid = Variant(iota) // Invalid UUID - RFC4122 // The variant specified in RFC4122 - Reserved // Reserved, NCS backward compatibility. - Microsoft // Reserved, Microsoft Corporation backward compatibility. - Future // Reserved for future definition. -) - -const randPoolSize = 16 * 16 - -var ( - rander = rand.Reader // random function - poolEnabled = false - poolMu sync.Mutex - poolPos = randPoolSize // protected with poolMu - pool [randPoolSize]byte // protected with poolMu -) - -type invalidLengthError struct{ len int } - -func (err invalidLengthError) Error() string { - return fmt.Sprintf("invalid UUID length: %d", err.len) -} - -// IsInvalidLengthError is matcher function for custom error invalidLengthError -func IsInvalidLengthError(err error) bool { - _, ok := err.(invalidLengthError) - return ok -} - -// Parse decodes s into a UUID or returns an error. Both the standard UUID -// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the -// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex -// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx. -func Parse(s string) (UUID, error) { - var uuid UUID - switch len(s) { - // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - case 36: - - // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - case 36 + 9: - if strings.ToLower(s[:9]) != "urn:uuid:" { - return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) - } - s = s[9:] - - // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} - case 36 + 2: - s = s[1:] - - // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx - case 32: - var ok bool - for i := range uuid { - uuid[i], ok = xtob(s[i*2], s[i*2+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - } - return uuid, nil - default: - return uuid, invalidLengthError{len(s)} - } - // s is now at least 36 bytes long - // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { - return uuid, errors.New("invalid UUID format") - } - for i, x := range [16]int{ - 0, 2, 4, 6, - 9, 11, - 14, 16, - 19, 21, - 24, 26, 28, 30, 32, 34} { - v, ok := xtob(s[x], s[x+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - uuid[i] = v - } - return uuid, nil -} - -// ParseBytes is like Parse, except it parses a byte slice instead of a string. -func ParseBytes(b []byte) (UUID, error) { - var uuid UUID - switch len(b) { - case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { - return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) - } - b = b[9:] - case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} - b = b[1:] - case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx - var ok bool - for i := 0; i < 32; i += 2 { - uuid[i/2], ok = xtob(b[i], b[i+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - } - return uuid, nil - default: - return uuid, invalidLengthError{len(b)} - } - // s is now at least 36 bytes long - // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' { - return uuid, errors.New("invalid UUID format") - } - for i, x := range [16]int{ - 0, 2, 4, 6, - 9, 11, - 14, 16, - 19, 21, - 24, 26, 28, 30, 32, 34} { - v, ok := xtob(b[x], b[x+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - uuid[i] = v - } - return uuid, nil -} - -// MustParse is like Parse but panics if the string cannot be parsed. -// It simplifies safe initialization of global variables holding compiled UUIDs. -func MustParse(s string) UUID { - uuid, err := Parse(s) - if err != nil { - panic(`uuid: Parse(` + s + `): ` + err.Error()) - } - return uuid -} - -// FromBytes creates a new UUID from a byte slice. Returns an error if the slice -// does not have a length of 16. The bytes are copied from the slice. -func FromBytes(b []byte) (uuid UUID, err error) { - err = uuid.UnmarshalBinary(b) - return uuid, err -} - -// Must returns uuid if err is nil and panics otherwise. -func Must(uuid UUID, err error) UUID { - if err != nil { - panic(err) - } - return uuid -} - -// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx -// , or "" if uuid is invalid. -func (uuid UUID) String() string { - var buf [36]byte - encodeHex(buf[:], uuid) - return string(buf[:]) -} - -// URN returns the RFC 2141 URN form of uuid, -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. -func (uuid UUID) URN() string { - var buf [36 + 9]byte - copy(buf[:], "urn:uuid:") - encodeHex(buf[9:], uuid) - return string(buf[:]) -} - -func encodeHex(dst []byte, uuid UUID) { - hex.Encode(dst, uuid[:4]) - dst[8] = '-' - hex.Encode(dst[9:13], uuid[4:6]) - dst[13] = '-' - hex.Encode(dst[14:18], uuid[6:8]) - dst[18] = '-' - hex.Encode(dst[19:23], uuid[8:10]) - dst[23] = '-' - hex.Encode(dst[24:], uuid[10:]) -} - -// Variant returns the variant encoded in uuid. -func (uuid UUID) Variant() Variant { - switch { - case (uuid[8] & 0xc0) == 0x80: - return RFC4122 - case (uuid[8] & 0xe0) == 0xc0: - return Microsoft - case (uuid[8] & 0xe0) == 0xe0: - return Future - default: - return Reserved - } -} - -// Version returns the version of uuid. -func (uuid UUID) Version() Version { - return Version(uuid[6] >> 4) -} - -func (v Version) String() string { - if v > 15 { - return fmt.Sprintf("BAD_VERSION_%d", v) - } - return fmt.Sprintf("VERSION_%d", v) -} - -func (v Variant) String() string { - switch v { - case RFC4122: - return "RFC4122" - case Reserved: - return "Reserved" - case Microsoft: - return "Microsoft" - case Future: - return "Future" - case Invalid: - return "Invalid" - } - return fmt.Sprintf("BadVariant%d", int(v)) -} - -// SetRand sets the random number generator to r, which implements io.Reader. -// If r.Read returns an error when the package requests random data then -// a panic will be issued. -// -// Calling SetRand with nil sets the random number generator to the default -// generator. -func SetRand(r io.Reader) { - if r == nil { - rander = rand.Reader - return - } - rander = r -} - -// EnableRandPool enables internal randomness pool used for Random -// (Version 4) UUID generation. The pool contains random bytes read from -// the random number generator on demand in batches. Enabling the pool -// may improve the UUID generation throughput significantly. -// -// Since the pool is stored on the Go heap, this feature may be a bad fit -// for security sensitive applications. -// -// Both EnableRandPool and DisableRandPool are not thread-safe and should -// only be called when there is no possibility that New or any other -// UUID Version 4 generation function will be called concurrently. -func EnableRandPool() { - poolEnabled = true -} - -// DisableRandPool disables the randomness pool if it was previously -// enabled with EnableRandPool. -// -// Both EnableRandPool and DisableRandPool are not thread-safe and should -// only be called when there is no possibility that New or any other -// UUID Version 4 generation function will be called concurrently. -func DisableRandPool() { - poolEnabled = false - defer poolMu.Unlock() - poolMu.Lock() - poolPos = randPoolSize -} +// Copyright 2018 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "io" + "strings" + "sync" +) + +// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC +// 4122. +type UUID [16]byte + +// A Version represents a UUID's version. +type Version byte + +// A Variant represents a UUID's variant. +type Variant byte + +// Constants returned by Variant. +const ( + Invalid = Variant(iota) // Invalid UUID + RFC4122 // The variant specified in RFC4122 + Reserved // Reserved, NCS backward compatibility. + Microsoft // Reserved, Microsoft Corporation backward compatibility. + Future // Reserved for future definition. +) + +const randPoolSize = 16 * 16 + +var ( + rander = rand.Reader // random function + poolEnabled = false + poolMu sync.Mutex + poolPos = randPoolSize // protected with poolMu + pool [randPoolSize]byte // protected with poolMu +) + +type invalidLengthError struct{ len int } + +func (err invalidLengthError) Error() string { + return fmt.Sprintf("invalid UUID length: %d", err.len) +} + +// IsInvalidLengthError is matcher function for custom error invalidLengthError +func IsInvalidLengthError(err error) bool { + _, ok := err.(invalidLengthError) + return ok +} + +// Parse decodes s into a UUID or returns an error. Both the standard UUID +// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the +// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex +// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx. +func Parse(s string) (UUID, error) { + var uuid UUID + switch len(s) { + // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36: + + // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: + if strings.ToLower(s[:9]) != "urn:uuid:" { + return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) + } + s = s[9:] + + // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + case 36 + 2: + s = s[1:] + + // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + case 32: + var ok bool + for i := range uuid { + uuid[i], ok = xtob(s[i*2], s[i*2+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + } + return uuid, nil + default: + return uuid, invalidLengthError{len(s)} + } + // s is now at least 36 bytes long + // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + v, ok := xtob(s[x], s[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// ParseBytes is like Parse, except it parses a byte slice instead of a string. +func ParseBytes(b []byte) (UUID, error) { + var uuid UUID + switch len(b) { + case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { + return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) + } + b = b[9:] + case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + b = b[1:] + case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + var ok bool + for i := 0; i < 32; i += 2 { + uuid[i/2], ok = xtob(b[i], b[i+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + } + return uuid, nil + default: + return uuid, invalidLengthError{len(b)} + } + // s is now at least 36 bytes long + // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + v, ok := xtob(b[x], b[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// MustParse is like Parse but panics if the string cannot be parsed. +// It simplifies safe initialization of global variables holding compiled UUIDs. +func MustParse(s string) UUID { + uuid, err := Parse(s) + if err != nil { + panic(`uuid: Parse(` + s + `): ` + err.Error()) + } + return uuid +} + +// FromBytes creates a new UUID from a byte slice. Returns an error if the slice +// does not have a length of 16. The bytes are copied from the slice. +func FromBytes(b []byte) (uuid UUID, err error) { + err = uuid.UnmarshalBinary(b) + return uuid, err +} + +// Must returns uuid if err is nil and panics otherwise. +func Must(uuid UUID, err error) UUID { + if err != nil { + panic(err) + } + return uuid +} + +// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// , or "" if uuid is invalid. +func (uuid UUID) String() string { + var buf [36]byte + encodeHex(buf[:], uuid) + return string(buf[:]) +} + +// URN returns the RFC 2141 URN form of uuid, +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. +func (uuid UUID) URN() string { + var buf [36 + 9]byte + copy(buf[:], "urn:uuid:") + encodeHex(buf[9:], uuid) + return string(buf[:]) +} + +func encodeHex(dst []byte, uuid UUID) { + hex.Encode(dst, uuid[:4]) + dst[8] = '-' + hex.Encode(dst[9:13], uuid[4:6]) + dst[13] = '-' + hex.Encode(dst[14:18], uuid[6:8]) + dst[18] = '-' + hex.Encode(dst[19:23], uuid[8:10]) + dst[23] = '-' + hex.Encode(dst[24:], uuid[10:]) +} + +// Variant returns the variant encoded in uuid. +func (uuid UUID) Variant() Variant { + switch { + case (uuid[8] & 0xc0) == 0x80: + return RFC4122 + case (uuid[8] & 0xe0) == 0xc0: + return Microsoft + case (uuid[8] & 0xe0) == 0xe0: + return Future + default: + return Reserved + } +} + +// Version returns the version of uuid. +func (uuid UUID) Version() Version { + return Version(uuid[6] >> 4) +} + +func (v Version) String() string { + if v > 15 { + return fmt.Sprintf("BAD_VERSION_%d", v) + } + return fmt.Sprintf("VERSION_%d", v) +} + +func (v Variant) String() string { + switch v { + case RFC4122: + return "RFC4122" + case Reserved: + return "Reserved" + case Microsoft: + return "Microsoft" + case Future: + return "Future" + case Invalid: + return "Invalid" + } + return fmt.Sprintf("BadVariant%d", int(v)) +} + +// SetRand sets the random number generator to r, which implements io.Reader. +// If r.Read returns an error when the package requests random data then +// a panic will be issued. +// +// Calling SetRand with nil sets the random number generator to the default +// generator. +func SetRand(r io.Reader) { + if r == nil { + rander = rand.Reader + return + } + rander = r +} + +// EnableRandPool enables internal randomness pool used for Random +// (Version 4) UUID generation. The pool contains random bytes read from +// the random number generator on demand in batches. Enabling the pool +// may improve the UUID generation throughput significantly. +// +// Since the pool is stored on the Go heap, this feature may be a bad fit +// for security sensitive applications. +// +// Both EnableRandPool and DisableRandPool are not thread-safe and should +// only be called when there is no possibility that New or any other +// UUID Version 4 generation function will be called concurrently. +func EnableRandPool() { + poolEnabled = true +} + +// DisableRandPool disables the randomness pool if it was previously +// enabled with EnableRandPool. +// +// Both EnableRandPool and DisableRandPool are not thread-safe and should +// only be called when there is no possibility that New or any other +// UUID Version 4 generation function will be called concurrently. +func DisableRandPool() { + poolEnabled = false + defer poolMu.Unlock() + poolMu.Lock() + poolPos = randPoolSize +} diff --git a/vendor/github.com/google/uuid/version1.go b/vendor/github.com/google/uuid/version1.go index 463109629e..29ded56149 100644 --- a/vendor/github.com/google/uuid/version1.go +++ b/vendor/github.com/google/uuid/version1.go @@ -1,44 +1,44 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" -) - -// NewUUID returns a Version 1 UUID based on the current NodeID and clock -// sequence, and the current time. If the NodeID has not been set by SetNodeID -// or SetNodeInterface then it will be set automatically. If the NodeID cannot -// be set NewUUID returns nil. If clock sequence has not been set by -// SetClockSequence then it will be set automatically. If GetTime fails to -// return the current NewUUID returns nil and an error. -// -// In most cases, New should be used. -func NewUUID() (UUID, error) { - var uuid UUID - now, seq, err := GetTime() - if err != nil { - return uuid, err - } - - timeLow := uint32(now & 0xffffffff) - timeMid := uint16((now >> 32) & 0xffff) - timeHi := uint16((now >> 48) & 0x0fff) - timeHi |= 0x1000 // Version 1 - - binary.BigEndian.PutUint32(uuid[0:], timeLow) - binary.BigEndian.PutUint16(uuid[4:], timeMid) - binary.BigEndian.PutUint16(uuid[6:], timeHi) - binary.BigEndian.PutUint16(uuid[8:], seq) - - nodeMu.Lock() - if nodeID == zeroID { - setNodeInterface("") - } - copy(uuid[10:], nodeID[:]) - nodeMu.Unlock() - - return uuid, nil -} +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" +) + +// NewUUID returns a Version 1 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewUUID returns nil. If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewUUID returns nil and an error. +// +// In most cases, New should be used. +func NewUUID() (UUID, error) { + var uuid UUID + now, seq, err := GetTime() + if err != nil { + return uuid, err + } + + timeLow := uint32(now & 0xffffffff) + timeMid := uint16((now >> 32) & 0xffff) + timeHi := uint16((now >> 48) & 0x0fff) + timeHi |= 0x1000 // Version 1 + + binary.BigEndian.PutUint32(uuid[0:], timeLow) + binary.BigEndian.PutUint16(uuid[4:], timeMid) + binary.BigEndian.PutUint16(uuid[6:], timeHi) + binary.BigEndian.PutUint16(uuid[8:], seq) + + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + copy(uuid[10:], nodeID[:]) + nodeMu.Unlock() + + return uuid, nil +} diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go index 7697802e4d..aad9788819 100644 --- a/vendor/github.com/google/uuid/version4.go +++ b/vendor/github.com/google/uuid/version4.go @@ -1,76 +1,76 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import "io" - -// New creates a new random UUID or panics. New is equivalent to -// the expression -// -// uuid.Must(uuid.NewRandom()) -func New() UUID { - return Must(NewRandom()) -} - -// NewString creates a new random UUID and returns it as a string or panics. -// NewString is equivalent to the expression -// -// uuid.New().String() -func NewString() string { - return Must(NewRandom()).String() -} - -// NewRandom returns a Random (Version 4) UUID. -// -// The strength of the UUIDs is based on the strength of the crypto/rand -// package. -// -// Uses the randomness pool if it was enabled with EnableRandPool. -// -// A note about uniqueness derived from the UUID Wikipedia entry: -// -// Randomly generated UUIDs have 122 random bits. One's annual risk of being -// hit by a meteorite is estimated to be one chance in 17 billion, that -// means the probability is about 0.00000000006 (6 × 10−11), -// equivalent to the odds of creating a few tens of trillions of UUIDs in a -// year and having one duplicate. -func NewRandom() (UUID, error) { - if !poolEnabled { - return NewRandomFromReader(rander) - } - return newRandomFromPool() -} - -// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader. -func NewRandomFromReader(r io.Reader) (UUID, error) { - var uuid UUID - _, err := io.ReadFull(r, uuid[:]) - if err != nil { - return Nil, err - } - uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 - uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 - return uuid, nil -} - -func newRandomFromPool() (UUID, error) { - var uuid UUID - poolMu.Lock() - if poolPos == randPoolSize { - _, err := io.ReadFull(rander, pool[:]) - if err != nil { - poolMu.Unlock() - return Nil, err - } - poolPos = 0 - } - copy(uuid[:], pool[poolPos:(poolPos+16)]) - poolPos += 16 - poolMu.Unlock() - - uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 - uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 - return uuid, nil -} +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "io" + +// New creates a new random UUID or panics. New is equivalent to +// the expression +// +// uuid.Must(uuid.NewRandom()) +func New() UUID { + return Must(NewRandom()) +} + +// NewString creates a new random UUID and returns it as a string or panics. +// NewString is equivalent to the expression +// +// uuid.New().String() +func NewString() string { + return Must(NewRandom()).String() +} + +// NewRandom returns a Random (Version 4) UUID. +// +// The strength of the UUIDs is based on the strength of the crypto/rand +// package. +// +// Uses the randomness pool if it was enabled with EnableRandPool. +// +// A note about uniqueness derived from the UUID Wikipedia entry: +// +// Randomly generated UUIDs have 122 random bits. One's annual risk of being +// hit by a meteorite is estimated to be one chance in 17 billion, that +// means the probability is about 0.00000000006 (6 × 10−11), +// equivalent to the odds of creating a few tens of trillions of UUIDs in a +// year and having one duplicate. +func NewRandom() (UUID, error) { + if !poolEnabled { + return NewRandomFromReader(rander) + } + return newRandomFromPool() +} + +// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader. +func NewRandomFromReader(r io.Reader) (UUID, error) { + var uuid UUID + _, err := io.ReadFull(r, uuid[:]) + if err != nil { + return Nil, err + } + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid, nil +} + +func newRandomFromPool() (UUID, error) { + var uuid UUID + poolMu.Lock() + if poolPos == randPoolSize { + _, err := io.ReadFull(rander, pool[:]) + if err != nil { + poolMu.Unlock() + return Nil, err + } + poolPos = 0 + } + copy(uuid[:], pool[poolPos:(poolPos+16)]) + poolPos += 16 + poolMu.Unlock() + + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid, nil +} diff --git a/vendor/github.com/joho/godotenv/.gitignore b/vendor/github.com/joho/godotenv/.gitignore index e43b0f9889..6e8821bcc5 100644 --- a/vendor/github.com/joho/godotenv/.gitignore +++ b/vendor/github.com/joho/godotenv/.gitignore @@ -1 +1 @@ -.DS_Store +.DS_Store diff --git a/vendor/github.com/joho/godotenv/LICENCE b/vendor/github.com/joho/godotenv/LICENCE index e7ddd51be9..ea4606511b 100644 --- a/vendor/github.com/joho/godotenv/LICENCE +++ b/vendor/github.com/joho/godotenv/LICENCE @@ -1,23 +1,23 @@ -Copyright (c) 2013 John Barton - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - +Copyright (c) 2013 John Barton + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/joho/godotenv/README.md b/vendor/github.com/joho/godotenv/README.md index bfbe66a0e9..1404043935 100644 --- a/vendor/github.com/joho/godotenv/README.md +++ b/vendor/github.com/joho/godotenv/README.md @@ -1,202 +1,202 @@ -# GoDotEnv ![CI](https://github.com/joho/godotenv/workflows/CI/badge.svg) [![Go Report Card](https://goreportcard.com/badge/github.com/joho/godotenv)](https://goreportcard.com/report/github.com/joho/godotenv) - -A Go (golang) port of the Ruby [dotenv](https://github.com/bkeepers/dotenv) project (which loads env vars from a .env file). - -From the original Library: - -> Storing configuration in the environment is one of the tenets of a twelve-factor app. Anything that is likely to change between deployment environments–such as resource handles for databases or credentials for external services–should be extracted from the code into environment variables. -> -> But it is not always practical to set environment variables on development machines or continuous integration servers where multiple projects are run. Dotenv load variables from a .env file into ENV when the environment is bootstrapped. - -It can be used as a library (for loading in env for your own daemons etc.) or as a bin command. - -There is test coverage and CI for both linuxish and Windows environments, but I make no guarantees about the bin version working on Windows. - -## Installation - -As a library - -```shell -go get github.com/joho/godotenv -``` - -or if you want to use it as a bin command - -go >= 1.17 -```shell -go install github.com/joho/godotenv/cmd/godotenv@latest -``` - -go < 1.17 -```shell -go get github.com/joho/godotenv/cmd/godotenv -``` - -## Usage - -Add your application configuration to your `.env` file in the root of your project: - -```shell -S3_BUCKET=YOURS3BUCKET -SECRET_KEY=YOURSECRETKEYGOESHERE -``` - -Then in your Go app you can do something like - -```go -package main - -import ( - "log" - "os" - - "github.com/joho/godotenv" -) - -func main() { - err := godotenv.Load() - if err != nil { - log.Fatal("Error loading .env file") - } - - s3Bucket := os.Getenv("S3_BUCKET") - secretKey := os.Getenv("SECRET_KEY") - - // now do something with s3 or whatever -} -``` - -If you're even lazier than that, you can just take advantage of the autoload package which will read in `.env` on import - -```go -import _ "github.com/joho/godotenv/autoload" -``` - -While `.env` in the project root is the default, you don't have to be constrained, both examples below are 100% legit - -```go -godotenv.Load("somerandomfile") -godotenv.Load("filenumberone.env", "filenumbertwo.env") -``` - -If you want to be really fancy with your env file you can do comments and exports (below is a valid env file) - -```shell -# I am a comment and that is OK -SOME_VAR=someval -FOO=BAR # comments at line end are OK too -export BAR=BAZ -``` - -Or finally you can do YAML(ish) style - -```yaml -FOO: bar -BAR: baz -``` - -as a final aside, if you don't want godotenv munging your env you can just get a map back instead - -```go -var myEnv map[string]string -myEnv, err := godotenv.Read() - -s3Bucket := myEnv["S3_BUCKET"] -``` - -... or from an `io.Reader` instead of a local file - -```go -reader := getRemoteFile() -myEnv, err := godotenv.Parse(reader) -``` - -... or from a `string` if you so desire - -```go -content := getRemoteFileContent() -myEnv, err := godotenv.Unmarshal(content) -``` - -### Precedence & Conventions - -Existing envs take precedence of envs that are loaded later. - -The [convention](https://github.com/bkeepers/dotenv#what-other-env-files-can-i-use) -for managing multiple environments (i.e. development, test, production) -is to create an env named `{YOURAPP}_ENV` and load envs in this order: - -```go -env := os.Getenv("FOO_ENV") -if "" == env { - env = "development" -} - -godotenv.Load(".env." + env + ".local") -if "test" != env { - godotenv.Load(".env.local") -} -godotenv.Load(".env." + env) -godotenv.Load() // The Original .env -``` - -If you need to, you can also use `godotenv.Overload()` to defy this convention -and overwrite existing envs instead of only supplanting them. Use with caution. - -### Command Mode - -Assuming you've installed the command as above and you've got `$GOPATH/bin` in your `$PATH` - -``` -godotenv -f /some/path/to/.env some_command with some args -``` - -If you don't specify `-f` it will fall back on the default of loading `.env` in `PWD` - -By default, it won't override existing environment variables; you can do that with the `-o` flag. - -### Writing Env Files - -Godotenv can also write a map representing the environment to a correctly-formatted and escaped file - -```go -env, err := godotenv.Unmarshal("KEY=value") -err := godotenv.Write(env, "./.env") -``` - -... or to a string - -```go -env, err := godotenv.Unmarshal("KEY=value") -content, err := godotenv.Marshal(env) -``` - -## Contributing - -Contributions are welcome, but with some caveats. - -This library has been declared feature complete (see [#182](https://github.com/joho/godotenv/issues/182) for background) and will not be accepting issues or pull requests adding new functionality or breaking the library API. - -Contributions would be gladly accepted that: - -* bring this library's parsing into closer compatibility with the mainline dotenv implementations, in particular [Ruby's dotenv](https://github.com/bkeepers/dotenv) and [Node.js' dotenv](https://github.com/motdotla/dotenv) -* keep the library up to date with the go ecosystem (ie CI bumps, documentation changes, changes in the core libraries) -* bug fixes for use cases that pertain to the library's purpose of easing development of codebases deployed into twelve factor environments - -*code changes without tests and references to peer dotenv implementations will not be accepted* - -1. Fork it -2. Create your feature branch (`git checkout -b my-new-feature`) -3. Commit your changes (`git commit -am 'Added some feature'`) -4. Push to the branch (`git push origin my-new-feature`) -5. Create new Pull Request - -## Releases - -Releases should follow [Semver](http://semver.org/) though the first couple of releases are `v1` and `v1.1`. - -Use [annotated tags for all releases](https://github.com/joho/godotenv/issues/30). Example `git tag -a v1.2.1` - -## Who? - -The original library [dotenv](https://github.com/bkeepers/dotenv) was written by [Brandon Keepers](http://opensoul.org/), and this port was done by [John Barton](https://johnbarton.co/) based off the tests/fixtures in the original library. +# GoDotEnv ![CI](https://github.com/joho/godotenv/workflows/CI/badge.svg) [![Go Report Card](https://goreportcard.com/badge/github.com/joho/godotenv)](https://goreportcard.com/report/github.com/joho/godotenv) + +A Go (golang) port of the Ruby [dotenv](https://github.com/bkeepers/dotenv) project (which loads env vars from a .env file). + +From the original Library: + +> Storing configuration in the environment is one of the tenets of a twelve-factor app. Anything that is likely to change between deployment environments–such as resource handles for databases or credentials for external services–should be extracted from the code into environment variables. +> +> But it is not always practical to set environment variables on development machines or continuous integration servers where multiple projects are run. Dotenv load variables from a .env file into ENV when the environment is bootstrapped. + +It can be used as a library (for loading in env for your own daemons etc.) or as a bin command. + +There is test coverage and CI for both linuxish and Windows environments, but I make no guarantees about the bin version working on Windows. + +## Installation + +As a library + +```shell +go get github.com/joho/godotenv +``` + +or if you want to use it as a bin command + +go >= 1.17 +```shell +go install github.com/joho/godotenv/cmd/godotenv@latest +``` + +go < 1.17 +```shell +go get github.com/joho/godotenv/cmd/godotenv +``` + +## Usage + +Add your application configuration to your `.env` file in the root of your project: + +```shell +S3_BUCKET=YOURS3BUCKET +SECRET_KEY=YOURSECRETKEYGOESHERE +``` + +Then in your Go app you can do something like + +```go +package main + +import ( + "log" + "os" + + "github.com/joho/godotenv" +) + +func main() { + err := godotenv.Load() + if err != nil { + log.Fatal("Error loading .env file") + } + + s3Bucket := os.Getenv("S3_BUCKET") + secretKey := os.Getenv("SECRET_KEY") + + // now do something with s3 or whatever +} +``` + +If you're even lazier than that, you can just take advantage of the autoload package which will read in `.env` on import + +```go +import _ "github.com/joho/godotenv/autoload" +``` + +While `.env` in the project root is the default, you don't have to be constrained, both examples below are 100% legit + +```go +godotenv.Load("somerandomfile") +godotenv.Load("filenumberone.env", "filenumbertwo.env") +``` + +If you want to be really fancy with your env file you can do comments and exports (below is a valid env file) + +```shell +# I am a comment and that is OK +SOME_VAR=someval +FOO=BAR # comments at line end are OK too +export BAR=BAZ +``` + +Or finally you can do YAML(ish) style + +```yaml +FOO: bar +BAR: baz +``` + +as a final aside, if you don't want godotenv munging your env you can just get a map back instead + +```go +var myEnv map[string]string +myEnv, err := godotenv.Read() + +s3Bucket := myEnv["S3_BUCKET"] +``` + +... or from an `io.Reader` instead of a local file + +```go +reader := getRemoteFile() +myEnv, err := godotenv.Parse(reader) +``` + +... or from a `string` if you so desire + +```go +content := getRemoteFileContent() +myEnv, err := godotenv.Unmarshal(content) +``` + +### Precedence & Conventions + +Existing envs take precedence of envs that are loaded later. + +The [convention](https://github.com/bkeepers/dotenv#what-other-env-files-can-i-use) +for managing multiple environments (i.e. development, test, production) +is to create an env named `{YOURAPP}_ENV` and load envs in this order: + +```go +env := os.Getenv("FOO_ENV") +if "" == env { + env = "development" +} + +godotenv.Load(".env." + env + ".local") +if "test" != env { + godotenv.Load(".env.local") +} +godotenv.Load(".env." + env) +godotenv.Load() // The Original .env +``` + +If you need to, you can also use `godotenv.Overload()` to defy this convention +and overwrite existing envs instead of only supplanting them. Use with caution. + +### Command Mode + +Assuming you've installed the command as above and you've got `$GOPATH/bin` in your `$PATH` + +``` +godotenv -f /some/path/to/.env some_command with some args +``` + +If you don't specify `-f` it will fall back on the default of loading `.env` in `PWD` + +By default, it won't override existing environment variables; you can do that with the `-o` flag. + +### Writing Env Files + +Godotenv can also write a map representing the environment to a correctly-formatted and escaped file + +```go +env, err := godotenv.Unmarshal("KEY=value") +err := godotenv.Write(env, "./.env") +``` + +... or to a string + +```go +env, err := godotenv.Unmarshal("KEY=value") +content, err := godotenv.Marshal(env) +``` + +## Contributing + +Contributions are welcome, but with some caveats. + +This library has been declared feature complete (see [#182](https://github.com/joho/godotenv/issues/182) for background) and will not be accepting issues or pull requests adding new functionality or breaking the library API. + +Contributions would be gladly accepted that: + +* bring this library's parsing into closer compatibility with the mainline dotenv implementations, in particular [Ruby's dotenv](https://github.com/bkeepers/dotenv) and [Node.js' dotenv](https://github.com/motdotla/dotenv) +* keep the library up to date with the go ecosystem (ie CI bumps, documentation changes, changes in the core libraries) +* bug fixes for use cases that pertain to the library's purpose of easing development of codebases deployed into twelve factor environments + +*code changes without tests and references to peer dotenv implementations will not be accepted* + +1. Fork it +2. Create your feature branch (`git checkout -b my-new-feature`) +3. Commit your changes (`git commit -am 'Added some feature'`) +4. Push to the branch (`git push origin my-new-feature`) +5. Create new Pull Request + +## Releases + +Releases should follow [Semver](http://semver.org/) though the first couple of releases are `v1` and `v1.1`. + +Use [annotated tags for all releases](https://github.com/joho/godotenv/issues/30). Example `git tag -a v1.2.1` + +## Who? + +The original library [dotenv](https://github.com/bkeepers/dotenv) was written by [Brandon Keepers](http://opensoul.org/), and this port was done by [John Barton](https://johnbarton.co/) based off the tests/fixtures in the original library. diff --git a/vendor/github.com/joho/godotenv/godotenv.go b/vendor/github.com/joho/godotenv/godotenv.go index 61b0ebbafa..ec3406814c 100644 --- a/vendor/github.com/joho/godotenv/godotenv.go +++ b/vendor/github.com/joho/godotenv/godotenv.go @@ -1,228 +1,228 @@ -// Package godotenv is a go port of the ruby dotenv library (https://github.com/bkeepers/dotenv) -// -// Examples/readme can be found on the GitHub page at https://github.com/joho/godotenv -// -// The TL;DR is that you make a .env file that looks something like -// -// SOME_ENV_VAR=somevalue -// -// and then in your go code you can call -// -// godotenv.Load() -// -// and all the env vars declared in .env will be available through os.Getenv("SOME_ENV_VAR") -package godotenv - -import ( - "bytes" - "fmt" - "io" - "os" - "os/exec" - "sort" - "strconv" - "strings" -) - -const doubleQuoteSpecialChars = "\\\n\r\"!$`" - -// Parse reads an env file from io.Reader, returning a map of keys and values. -func Parse(r io.Reader) (map[string]string, error) { - var buf bytes.Buffer - _, err := io.Copy(&buf, r) - if err != nil { - return nil, err - } - - return UnmarshalBytes(buf.Bytes()) -} - -// Load will read your env file(s) and load them into ENV for this process. -// -// Call this function as close as possible to the start of your program (ideally in main). -// -// If you call Load without any args it will default to loading .env in the current path. -// -// You can otherwise tell it which files to load (there can be more than one) like: -// -// godotenv.Load("fileone", "filetwo") -// -// It's important to note that it WILL NOT OVERRIDE an env variable that already exists - consider the .env file to set dev vars or sensible defaults. -func Load(filenames ...string) (err error) { - filenames = filenamesOrDefault(filenames) - - for _, filename := range filenames { - err = loadFile(filename, false) - if err != nil { - return // return early on a spazout - } - } - return -} - -// Overload will read your env file(s) and load them into ENV for this process. -// -// Call this function as close as possible to the start of your program (ideally in main). -// -// If you call Overload without any args it will default to loading .env in the current path. -// -// You can otherwise tell it which files to load (there can be more than one) like: -// -// godotenv.Overload("fileone", "filetwo") -// -// It's important to note this WILL OVERRIDE an env variable that already exists - consider the .env file to forcefully set all vars. -func Overload(filenames ...string) (err error) { - filenames = filenamesOrDefault(filenames) - - for _, filename := range filenames { - err = loadFile(filename, true) - if err != nil { - return // return early on a spazout - } - } - return -} - -// Read all env (with same file loading semantics as Load) but return values as -// a map rather than automatically writing values into env -func Read(filenames ...string) (envMap map[string]string, err error) { - filenames = filenamesOrDefault(filenames) - envMap = make(map[string]string) - - for _, filename := range filenames { - individualEnvMap, individualErr := readFile(filename) - - if individualErr != nil { - err = individualErr - return // return early on a spazout - } - - for key, value := range individualEnvMap { - envMap[key] = value - } - } - - return -} - -// Unmarshal reads an env file from a string, returning a map of keys and values. -func Unmarshal(str string) (envMap map[string]string, err error) { - return UnmarshalBytes([]byte(str)) -} - -// UnmarshalBytes parses env file from byte slice of chars, returning a map of keys and values. -func UnmarshalBytes(src []byte) (map[string]string, error) { - out := make(map[string]string) - err := parseBytes(src, out) - - return out, err -} - -// Exec loads env vars from the specified filenames (empty map falls back to default) -// then executes the cmd specified. -// -// Simply hooks up os.Stdin/err/out to the command and calls Run(). -// -// If you want more fine grained control over your command it's recommended -// that you use `Load()`, `Overload()` or `Read()` and the `os/exec` package yourself. -func Exec(filenames []string, cmd string, cmdArgs []string, overload bool) error { - op := Load - if overload { - op = Overload - } - if err := op(filenames...); err != nil { - return err - } - - command := exec.Command(cmd, cmdArgs...) - command.Stdin = os.Stdin - command.Stdout = os.Stdout - command.Stderr = os.Stderr - return command.Run() -} - -// Write serializes the given environment and writes it to a file. -func Write(envMap map[string]string, filename string) error { - content, err := Marshal(envMap) - if err != nil { - return err - } - file, err := os.Create(filename) - if err != nil { - return err - } - defer file.Close() - _, err = file.WriteString(content + "\n") - if err != nil { - return err - } - return file.Sync() -} - -// Marshal outputs the given environment as a dotenv-formatted environment file. -// Each line is in the format: KEY="VALUE" where VALUE is backslash-escaped. -func Marshal(envMap map[string]string) (string, error) { - lines := make([]string, 0, len(envMap)) - for k, v := range envMap { - if d, err := strconv.Atoi(v); err == nil { - lines = append(lines, fmt.Sprintf(`%s=%d`, k, d)) - } else { - lines = append(lines, fmt.Sprintf(`%s="%s"`, k, doubleQuoteEscape(v))) - } - } - sort.Strings(lines) - return strings.Join(lines, "\n"), nil -} - -func filenamesOrDefault(filenames []string) []string { - if len(filenames) == 0 { - return []string{".env"} - } - return filenames -} - -func loadFile(filename string, overload bool) error { - envMap, err := readFile(filename) - if err != nil { - return err - } - - currentEnv := map[string]bool{} - rawEnv := os.Environ() - for _, rawEnvLine := range rawEnv { - key := strings.Split(rawEnvLine, "=")[0] - currentEnv[key] = true - } - - for key, value := range envMap { - if !currentEnv[key] || overload { - _ = os.Setenv(key, value) - } - } - - return nil -} - -func readFile(filename string) (envMap map[string]string, err error) { - file, err := os.Open(filename) - if err != nil { - return - } - defer file.Close() - - return Parse(file) -} - -func doubleQuoteEscape(line string) string { - for _, c := range doubleQuoteSpecialChars { - toReplace := "\\" + string(c) - if c == '\n' { - toReplace = `\n` - } - if c == '\r' { - toReplace = `\r` - } - line = strings.Replace(line, string(c), toReplace, -1) - } - return line -} +// Package godotenv is a go port of the ruby dotenv library (https://github.com/bkeepers/dotenv) +// +// Examples/readme can be found on the GitHub page at https://github.com/joho/godotenv +// +// The TL;DR is that you make a .env file that looks something like +// +// SOME_ENV_VAR=somevalue +// +// and then in your go code you can call +// +// godotenv.Load() +// +// and all the env vars declared in .env will be available through os.Getenv("SOME_ENV_VAR") +package godotenv + +import ( + "bytes" + "fmt" + "io" + "os" + "os/exec" + "sort" + "strconv" + "strings" +) + +const doubleQuoteSpecialChars = "\\\n\r\"!$`" + +// Parse reads an env file from io.Reader, returning a map of keys and values. +func Parse(r io.Reader) (map[string]string, error) { + var buf bytes.Buffer + _, err := io.Copy(&buf, r) + if err != nil { + return nil, err + } + + return UnmarshalBytes(buf.Bytes()) +} + +// Load will read your env file(s) and load them into ENV for this process. +// +// Call this function as close as possible to the start of your program (ideally in main). +// +// If you call Load without any args it will default to loading .env in the current path. +// +// You can otherwise tell it which files to load (there can be more than one) like: +// +// godotenv.Load("fileone", "filetwo") +// +// It's important to note that it WILL NOT OVERRIDE an env variable that already exists - consider the .env file to set dev vars or sensible defaults. +func Load(filenames ...string) (err error) { + filenames = filenamesOrDefault(filenames) + + for _, filename := range filenames { + err = loadFile(filename, false) + if err != nil { + return // return early on a spazout + } + } + return +} + +// Overload will read your env file(s) and load them into ENV for this process. +// +// Call this function as close as possible to the start of your program (ideally in main). +// +// If you call Overload without any args it will default to loading .env in the current path. +// +// You can otherwise tell it which files to load (there can be more than one) like: +// +// godotenv.Overload("fileone", "filetwo") +// +// It's important to note this WILL OVERRIDE an env variable that already exists - consider the .env file to forcefully set all vars. +func Overload(filenames ...string) (err error) { + filenames = filenamesOrDefault(filenames) + + for _, filename := range filenames { + err = loadFile(filename, true) + if err != nil { + return // return early on a spazout + } + } + return +} + +// Read all env (with same file loading semantics as Load) but return values as +// a map rather than automatically writing values into env +func Read(filenames ...string) (envMap map[string]string, err error) { + filenames = filenamesOrDefault(filenames) + envMap = make(map[string]string) + + for _, filename := range filenames { + individualEnvMap, individualErr := readFile(filename) + + if individualErr != nil { + err = individualErr + return // return early on a spazout + } + + for key, value := range individualEnvMap { + envMap[key] = value + } + } + + return +} + +// Unmarshal reads an env file from a string, returning a map of keys and values. +func Unmarshal(str string) (envMap map[string]string, err error) { + return UnmarshalBytes([]byte(str)) +} + +// UnmarshalBytes parses env file from byte slice of chars, returning a map of keys and values. +func UnmarshalBytes(src []byte) (map[string]string, error) { + out := make(map[string]string) + err := parseBytes(src, out) + + return out, err +} + +// Exec loads env vars from the specified filenames (empty map falls back to default) +// then executes the cmd specified. +// +// Simply hooks up os.Stdin/err/out to the command and calls Run(). +// +// If you want more fine grained control over your command it's recommended +// that you use `Load()`, `Overload()` or `Read()` and the `os/exec` package yourself. +func Exec(filenames []string, cmd string, cmdArgs []string, overload bool) error { + op := Load + if overload { + op = Overload + } + if err := op(filenames...); err != nil { + return err + } + + command := exec.Command(cmd, cmdArgs...) + command.Stdin = os.Stdin + command.Stdout = os.Stdout + command.Stderr = os.Stderr + return command.Run() +} + +// Write serializes the given environment and writes it to a file. +func Write(envMap map[string]string, filename string) error { + content, err := Marshal(envMap) + if err != nil { + return err + } + file, err := os.Create(filename) + if err != nil { + return err + } + defer file.Close() + _, err = file.WriteString(content + "\n") + if err != nil { + return err + } + return file.Sync() +} + +// Marshal outputs the given environment as a dotenv-formatted environment file. +// Each line is in the format: KEY="VALUE" where VALUE is backslash-escaped. +func Marshal(envMap map[string]string) (string, error) { + lines := make([]string, 0, len(envMap)) + for k, v := range envMap { + if d, err := strconv.Atoi(v); err == nil { + lines = append(lines, fmt.Sprintf(`%s=%d`, k, d)) + } else { + lines = append(lines, fmt.Sprintf(`%s="%s"`, k, doubleQuoteEscape(v))) + } + } + sort.Strings(lines) + return strings.Join(lines, "\n"), nil +} + +func filenamesOrDefault(filenames []string) []string { + if len(filenames) == 0 { + return []string{".env"} + } + return filenames +} + +func loadFile(filename string, overload bool) error { + envMap, err := readFile(filename) + if err != nil { + return err + } + + currentEnv := map[string]bool{} + rawEnv := os.Environ() + for _, rawEnvLine := range rawEnv { + key := strings.Split(rawEnvLine, "=")[0] + currentEnv[key] = true + } + + for key, value := range envMap { + if !currentEnv[key] || overload { + _ = os.Setenv(key, value) + } + } + + return nil +} + +func readFile(filename string) (envMap map[string]string, err error) { + file, err := os.Open(filename) + if err != nil { + return + } + defer file.Close() + + return Parse(file) +} + +func doubleQuoteEscape(line string) string { + for _, c := range doubleQuoteSpecialChars { + toReplace := "\\" + string(c) + if c == '\n' { + toReplace = `\n` + } + if c == '\r' { + toReplace = `\r` + } + line = strings.Replace(line, string(c), toReplace, -1) + } + return line +} diff --git a/vendor/github.com/joho/godotenv/parser.go b/vendor/github.com/joho/godotenv/parser.go index cc709af897..d4d3d91b40 100644 --- a/vendor/github.com/joho/godotenv/parser.go +++ b/vendor/github.com/joho/godotenv/parser.go @@ -1,271 +1,271 @@ -package godotenv - -import ( - "bytes" - "errors" - "fmt" - "regexp" - "strings" - "unicode" -) - -const ( - charComment = '#' - prefixSingleQuote = '\'' - prefixDoubleQuote = '"' - - exportPrefix = "export" -) - -func parseBytes(src []byte, out map[string]string) error { - src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1) - cutset := src - for { - cutset = getStatementStart(cutset) - if cutset == nil { - // reached end of file - break - } - - key, left, err := locateKeyName(cutset) - if err != nil { - return err - } - - value, left, err := extractVarValue(left, out) - if err != nil { - return err - } - - out[key] = value - cutset = left - } - - return nil -} - -// getStatementPosition returns position of statement begin. -// -// It skips any comment line or non-whitespace character. -func getStatementStart(src []byte) []byte { - pos := indexOfNonSpaceChar(src) - if pos == -1 { - return nil - } - - src = src[pos:] - if src[0] != charComment { - return src - } - - // skip comment section - pos = bytes.IndexFunc(src, isCharFunc('\n')) - if pos == -1 { - return nil - } - - return getStatementStart(src[pos:]) -} - -// locateKeyName locates and parses key name and returns rest of slice -func locateKeyName(src []byte) (key string, cutset []byte, err error) { - // trim "export" and space at beginning - src = bytes.TrimLeftFunc(src, isSpace) - if bytes.HasPrefix(src, []byte(exportPrefix)) { - trimmed := bytes.TrimPrefix(src, []byte(exportPrefix)) - if bytes.IndexFunc(trimmed, isSpace) == 0 { - src = bytes.TrimLeftFunc(trimmed, isSpace) - } - } - - // locate key name end and validate it in single loop - offset := 0 -loop: - for i, char := range src { - rchar := rune(char) - if isSpace(rchar) { - continue - } - - switch char { - case '=', ':': - // library also supports yaml-style value declaration - key = string(src[0:i]) - offset = i + 1 - break loop - case '_': - default: - // variable name should match [A-Za-z0-9_.] - if unicode.IsLetter(rchar) || unicode.IsNumber(rchar) || rchar == '.' { - continue - } - - return "", nil, fmt.Errorf( - `unexpected character %q in variable name near %q`, - string(char), string(src)) - } - } - - if len(src) == 0 { - return "", nil, errors.New("zero length string") - } - - // trim whitespace - key = strings.TrimRightFunc(key, unicode.IsSpace) - cutset = bytes.TrimLeftFunc(src[offset:], isSpace) - return key, cutset, nil -} - -// extractVarValue extracts variable value and returns rest of slice -func extractVarValue(src []byte, vars map[string]string) (value string, rest []byte, err error) { - quote, hasPrefix := hasQuotePrefix(src) - if !hasPrefix { - // unquoted value - read until end of line - endOfLine := bytes.IndexFunc(src, isLineEnd) - - // Hit EOF without a trailing newline - if endOfLine == -1 { - endOfLine = len(src) - - if endOfLine == 0 { - return "", nil, nil - } - } - - // Convert line to rune away to do accurate countback of runes - line := []rune(string(src[0:endOfLine])) - - // Assume end of line is end of var - endOfVar := len(line) - if endOfVar == 0 { - return "", src[endOfLine:], nil - } - - // Work backwards to check if the line ends in whitespace then - // a comment (ie asdasd # some comment) - for i := endOfVar - 1; i >= 0; i-- { - if line[i] == charComment && i > 0 { - if isSpace(line[i-1]) { - endOfVar = i - break - } - } - } - - trimmed := strings.TrimFunc(string(line[0:endOfVar]), isSpace) - - return expandVariables(trimmed, vars), src[endOfLine:], nil - } - - // lookup quoted string terminator - for i := 1; i < len(src); i++ { - if char := src[i]; char != quote { - continue - } - - // skip escaped quote symbol (\" or \', depends on quote) - if prevChar := src[i-1]; prevChar == '\\' { - continue - } - - // trim quotes - trimFunc := isCharFunc(rune(quote)) - value = string(bytes.TrimLeftFunc(bytes.TrimRightFunc(src[0:i], trimFunc), trimFunc)) - if quote == prefixDoubleQuote { - // unescape newlines for double quote (this is compat feature) - // and expand environment variables - value = expandVariables(expandEscapes(value), vars) - } - - return value, src[i+1:], nil - } - - // return formatted error if quoted string is not terminated - valEndIndex := bytes.IndexFunc(src, isCharFunc('\n')) - if valEndIndex == -1 { - valEndIndex = len(src) - } - - return "", nil, fmt.Errorf("unterminated quoted value %s", src[:valEndIndex]) -} - -func expandEscapes(str string) string { - out := escapeRegex.ReplaceAllStringFunc(str, func(match string) string { - c := strings.TrimPrefix(match, `\`) - switch c { - case "n": - return "\n" - case "r": - return "\r" - default: - return match - } - }) - return unescapeCharsRegex.ReplaceAllString(out, "$1") -} - -func indexOfNonSpaceChar(src []byte) int { - return bytes.IndexFunc(src, func(r rune) bool { - return !unicode.IsSpace(r) - }) -} - -// hasQuotePrefix reports whether charset starts with single or double quote and returns quote character -func hasQuotePrefix(src []byte) (prefix byte, isQuored bool) { - if len(src) == 0 { - return 0, false - } - - switch prefix := src[0]; prefix { - case prefixDoubleQuote, prefixSingleQuote: - return prefix, true - default: - return 0, false - } -} - -func isCharFunc(char rune) func(rune) bool { - return func(v rune) bool { - return v == char - } -} - -// isSpace reports whether the rune is a space character but not line break character -// -// this differs from unicode.IsSpace, which also applies line break as space -func isSpace(r rune) bool { - switch r { - case '\t', '\v', '\f', '\r', ' ', 0x85, 0xA0: - return true - } - return false -} - -func isLineEnd(r rune) bool { - if r == '\n' || r == '\r' { - return true - } - return false -} - -var ( - escapeRegex = regexp.MustCompile(`\\.`) - expandVarRegex = regexp.MustCompile(`(\\)?(\$)(\()?\{?([A-Z0-9_]+)?\}?`) - unescapeCharsRegex = regexp.MustCompile(`\\([^$])`) -) - -func expandVariables(v string, m map[string]string) string { - return expandVarRegex.ReplaceAllStringFunc(v, func(s string) string { - submatch := expandVarRegex.FindStringSubmatch(s) - - if submatch == nil { - return s - } - if submatch[1] == "\\" || submatch[2] == "(" { - return submatch[0][1:] - } else if submatch[4] != "" { - return m[submatch[4]] - } - return s - }) -} +package godotenv + +import ( + "bytes" + "errors" + "fmt" + "regexp" + "strings" + "unicode" +) + +const ( + charComment = '#' + prefixSingleQuote = '\'' + prefixDoubleQuote = '"' + + exportPrefix = "export" +) + +func parseBytes(src []byte, out map[string]string) error { + src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1) + cutset := src + for { + cutset = getStatementStart(cutset) + if cutset == nil { + // reached end of file + break + } + + key, left, err := locateKeyName(cutset) + if err != nil { + return err + } + + value, left, err := extractVarValue(left, out) + if err != nil { + return err + } + + out[key] = value + cutset = left + } + + return nil +} + +// getStatementPosition returns position of statement begin. +// +// It skips any comment line or non-whitespace character. +func getStatementStart(src []byte) []byte { + pos := indexOfNonSpaceChar(src) + if pos == -1 { + return nil + } + + src = src[pos:] + if src[0] != charComment { + return src + } + + // skip comment section + pos = bytes.IndexFunc(src, isCharFunc('\n')) + if pos == -1 { + return nil + } + + return getStatementStart(src[pos:]) +} + +// locateKeyName locates and parses key name and returns rest of slice +func locateKeyName(src []byte) (key string, cutset []byte, err error) { + // trim "export" and space at beginning + src = bytes.TrimLeftFunc(src, isSpace) + if bytes.HasPrefix(src, []byte(exportPrefix)) { + trimmed := bytes.TrimPrefix(src, []byte(exportPrefix)) + if bytes.IndexFunc(trimmed, isSpace) == 0 { + src = bytes.TrimLeftFunc(trimmed, isSpace) + } + } + + // locate key name end and validate it in single loop + offset := 0 +loop: + for i, char := range src { + rchar := rune(char) + if isSpace(rchar) { + continue + } + + switch char { + case '=', ':': + // library also supports yaml-style value declaration + key = string(src[0:i]) + offset = i + 1 + break loop + case '_': + default: + // variable name should match [A-Za-z0-9_.] + if unicode.IsLetter(rchar) || unicode.IsNumber(rchar) || rchar == '.' { + continue + } + + return "", nil, fmt.Errorf( + `unexpected character %q in variable name near %q`, + string(char), string(src)) + } + } + + if len(src) == 0 { + return "", nil, errors.New("zero length string") + } + + // trim whitespace + key = strings.TrimRightFunc(key, unicode.IsSpace) + cutset = bytes.TrimLeftFunc(src[offset:], isSpace) + return key, cutset, nil +} + +// extractVarValue extracts variable value and returns rest of slice +func extractVarValue(src []byte, vars map[string]string) (value string, rest []byte, err error) { + quote, hasPrefix := hasQuotePrefix(src) + if !hasPrefix { + // unquoted value - read until end of line + endOfLine := bytes.IndexFunc(src, isLineEnd) + + // Hit EOF without a trailing newline + if endOfLine == -1 { + endOfLine = len(src) + + if endOfLine == 0 { + return "", nil, nil + } + } + + // Convert line to rune away to do accurate countback of runes + line := []rune(string(src[0:endOfLine])) + + // Assume end of line is end of var + endOfVar := len(line) + if endOfVar == 0 { + return "", src[endOfLine:], nil + } + + // Work backwards to check if the line ends in whitespace then + // a comment (ie asdasd # some comment) + for i := endOfVar - 1; i >= 0; i-- { + if line[i] == charComment && i > 0 { + if isSpace(line[i-1]) { + endOfVar = i + break + } + } + } + + trimmed := strings.TrimFunc(string(line[0:endOfVar]), isSpace) + + return expandVariables(trimmed, vars), src[endOfLine:], nil + } + + // lookup quoted string terminator + for i := 1; i < len(src); i++ { + if char := src[i]; char != quote { + continue + } + + // skip escaped quote symbol (\" or \', depends on quote) + if prevChar := src[i-1]; prevChar == '\\' { + continue + } + + // trim quotes + trimFunc := isCharFunc(rune(quote)) + value = string(bytes.TrimLeftFunc(bytes.TrimRightFunc(src[0:i], trimFunc), trimFunc)) + if quote == prefixDoubleQuote { + // unescape newlines for double quote (this is compat feature) + // and expand environment variables + value = expandVariables(expandEscapes(value), vars) + } + + return value, src[i+1:], nil + } + + // return formatted error if quoted string is not terminated + valEndIndex := bytes.IndexFunc(src, isCharFunc('\n')) + if valEndIndex == -1 { + valEndIndex = len(src) + } + + return "", nil, fmt.Errorf("unterminated quoted value %s", src[:valEndIndex]) +} + +func expandEscapes(str string) string { + out := escapeRegex.ReplaceAllStringFunc(str, func(match string) string { + c := strings.TrimPrefix(match, `\`) + switch c { + case "n": + return "\n" + case "r": + return "\r" + default: + return match + } + }) + return unescapeCharsRegex.ReplaceAllString(out, "$1") +} + +func indexOfNonSpaceChar(src []byte) int { + return bytes.IndexFunc(src, func(r rune) bool { + return !unicode.IsSpace(r) + }) +} + +// hasQuotePrefix reports whether charset starts with single or double quote and returns quote character +func hasQuotePrefix(src []byte) (prefix byte, isQuored bool) { + if len(src) == 0 { + return 0, false + } + + switch prefix := src[0]; prefix { + case prefixDoubleQuote, prefixSingleQuote: + return prefix, true + default: + return 0, false + } +} + +func isCharFunc(char rune) func(rune) bool { + return func(v rune) bool { + return v == char + } +} + +// isSpace reports whether the rune is a space character but not line break character +// +// this differs from unicode.IsSpace, which also applies line break as space +func isSpace(r rune) bool { + switch r { + case '\t', '\v', '\f', '\r', ' ', 0x85, 0xA0: + return true + } + return false +} + +func isLineEnd(r rune) bool { + if r == '\n' || r == '\r' { + return true + } + return false +} + +var ( + escapeRegex = regexp.MustCompile(`\\.`) + expandVarRegex = regexp.MustCompile(`(\\)?(\$)(\()?\{?([A-Z0-9_]+)?\}?`) + unescapeCharsRegex = regexp.MustCompile(`\\([^$])`) +) + +func expandVariables(v string, m map[string]string) string { + return expandVarRegex.ReplaceAllStringFunc(v, func(s string) string { + submatch := expandVarRegex.FindStringSubmatch(s) + + if submatch == nil { + return s + } + if submatch[1] == "\\" || submatch[2] == "(" { + return submatch[0][1:] + } else if submatch[4] != "" { + return m[submatch[4]] + } + return s + }) +} diff --git a/vendor/github.com/klauspost/compress/LICENSE b/vendor/github.com/klauspost/compress/LICENSE index 87d5574777..5b20ee5b7b 100644 --- a/vendor/github.com/klauspost/compress/LICENSE +++ b/vendor/github.com/klauspost/compress/LICENSE @@ -1,304 +1,304 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. -Copyright (c) 2019 Klaus Post. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------------------- - -Files: gzhttp/* - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2016-2017 The New York Times Company - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ------------------- - -Files: s2/cmd/internal/readahead/* - -The MIT License (MIT) - -Copyright (c) 2015 Klaus Post - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ---------------------- -Files: snappy/* -Files: internal/snapref/* - -Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------------------ - -Files: s2/cmd/internal/filepathx/* - -Copyright 2016 The filepathx Authors - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2019 Klaus Post. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------ + +Files: gzhttp/* + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016-2017 The New York Times Company + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------ + +Files: s2/cmd/internal/readahead/* + +The MIT License (MIT) + +Copyright (c) 2015 Klaus Post + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +--------------------- +Files: snappy/* +Files: internal/snapref/* + +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +----------------- + +Files: s2/cmd/internal/filepathx/* + +Copyright 2016 The filepathx Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go index 82882961a0..8370ac9f5b 100644 --- a/vendor/github.com/klauspost/compress/flate/deflate.go +++ b/vendor/github.com/klauspost/compress/flate/deflate.go @@ -1,989 +1,989 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Copyright (c) 2015 Klaus Post -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flate - -import ( - "encoding/binary" - "fmt" - "io" - "math" -) - -const ( - NoCompression = 0 - BestSpeed = 1 - BestCompression = 9 - DefaultCompression = -1 - - // HuffmanOnly disables Lempel-Ziv match searching and only performs Huffman - // entropy encoding. This mode is useful in compressing data that has - // already been compressed with an LZ style algorithm (e.g. Snappy or LZ4) - // that lacks an entropy encoder. Compression gains are achieved when - // certain bytes in the input stream occur more frequently than others. - // - // Note that HuffmanOnly produces a compressed output that is - // RFC 1951 compliant. That is, any valid DEFLATE decompressor will - // continue to be able to decompress this output. - HuffmanOnly = -2 - ConstantCompression = HuffmanOnly // compatibility alias. - - logWindowSize = 15 - windowSize = 1 << logWindowSize - windowMask = windowSize - 1 - logMaxOffsetSize = 15 // Standard DEFLATE - minMatchLength = 4 // The smallest match that the compressor looks for - maxMatchLength = 258 // The longest match for the compressor - minOffsetSize = 1 // The shortest offset that makes any sense - - // The maximum number of tokens we will encode at the time. - // Smaller sizes usually creates less optimal blocks. - // Bigger can make context switching slow. - // We use this for levels 7-9, so we make it big. - maxFlateBlockTokens = 1 << 15 - maxStoreBlockSize = 65535 - hashBits = 17 // After 17 performance degrades - hashSize = 1 << hashBits - hashMask = (1 << hashBits) - 1 - hashShift = (hashBits + minMatchLength - 1) / minMatchLength - maxHashOffset = 1 << 28 - - skipNever = math.MaxInt32 - - debugDeflate = false -) - -type compressionLevel struct { - good, lazy, nice, chain, fastSkipHashing, level int -} - -// Compression levels have been rebalanced from zlib deflate defaults -// to give a bigger spread in speed and compression. -// See https://blog.klauspost.com/rebalancing-deflate-compression-levels/ -var levels = []compressionLevel{ - {}, // 0 - // Level 1-6 uses specialized algorithm - values not used - {0, 0, 0, 0, 0, 1}, - {0, 0, 0, 0, 0, 2}, - {0, 0, 0, 0, 0, 3}, - {0, 0, 0, 0, 0, 4}, - {0, 0, 0, 0, 0, 5}, - {0, 0, 0, 0, 0, 6}, - // Levels 7-9 use increasingly more lazy matching - // and increasingly stringent conditions for "good enough". - {8, 12, 16, 24, skipNever, 7}, - {16, 30, 40, 64, skipNever, 8}, - {32, 258, 258, 1024, skipNever, 9}, -} - -// advancedState contains state for the advanced levels, with bigger hash tables, etc. -type advancedState struct { - // deflate state - length int - offset int - maxInsertIndex int - chainHead int - hashOffset int - - ii uint16 // position of last match, intended to overflow to reset. - - // input window: unprocessed data is window[index:windowEnd] - index int - estBitsPerByte int - hashMatch [maxMatchLength + minMatchLength]uint32 - - // Input hash chains - // hashHead[hashValue] contains the largest inputIndex with the specified hash value - // If hashHead[hashValue] is within the current window, then - // hashPrev[hashHead[hashValue] & windowMask] contains the previous index - // with the same hash value. - hashHead [hashSize]uint32 - hashPrev [windowSize]uint32 -} - -type compressor struct { - compressionLevel - - h *huffmanEncoder - w *huffmanBitWriter - - // compression algorithm - fill func(*compressor, []byte) int // copy data to window - step func(*compressor) // process window - - window []byte - windowEnd int - blockStart int // window index where current tokens start - err error - - // queued output tokens - tokens tokens - fast fastEnc - state *advancedState - - sync bool // requesting flush - byteAvailable bool // if true, still need to process window[index-1]. -} - -func (d *compressor) fillDeflate(b []byte) int { - s := d.state - if s.index >= 2*windowSize-(minMatchLength+maxMatchLength) { - // shift the window by windowSize - //copy(d.window[:], d.window[windowSize:2*windowSize]) - *(*[windowSize]byte)(d.window) = *(*[windowSize]byte)(d.window[windowSize:]) - s.index -= windowSize - d.windowEnd -= windowSize - if d.blockStart >= windowSize { - d.blockStart -= windowSize - } else { - d.blockStart = math.MaxInt32 - } - s.hashOffset += windowSize - if s.hashOffset > maxHashOffset { - delta := s.hashOffset - 1 - s.hashOffset -= delta - s.chainHead -= delta - // Iterate over slices instead of arrays to avoid copying - // the entire table onto the stack (Issue #18625). - for i, v := range s.hashPrev[:] { - if int(v) > delta { - s.hashPrev[i] = uint32(int(v) - delta) - } else { - s.hashPrev[i] = 0 - } - } - for i, v := range s.hashHead[:] { - if int(v) > delta { - s.hashHead[i] = uint32(int(v) - delta) - } else { - s.hashHead[i] = 0 - } - } - } - } - n := copy(d.window[d.windowEnd:], b) - d.windowEnd += n - return n -} - -func (d *compressor) writeBlock(tok *tokens, index int, eof bool) error { - if index > 0 || eof { - var window []byte - if d.blockStart <= index { - window = d.window[d.blockStart:index] - } - d.blockStart = index - //d.w.writeBlock(tok, eof, window) - d.w.writeBlockDynamic(tok, eof, window, d.sync) - return d.w.err - } - return nil -} - -// writeBlockSkip writes the current block and uses the number of tokens -// to determine if the block should be stored on no matches, or -// only huffman encoded. -func (d *compressor) writeBlockSkip(tok *tokens, index int, eof bool) error { - if index > 0 || eof { - if d.blockStart <= index { - window := d.window[d.blockStart:index] - // If we removed less than a 64th of all literals - // we huffman compress the block. - if int(tok.n) > len(window)-int(tok.n>>6) { - d.w.writeBlockHuff(eof, window, d.sync) - } else { - // Write a dynamic huffman block. - d.w.writeBlockDynamic(tok, eof, window, d.sync) - } - } else { - d.w.writeBlock(tok, eof, nil) - } - d.blockStart = index - return d.w.err - } - return nil -} - -// fillWindow will fill the current window with the supplied -// dictionary and calculate all hashes. -// This is much faster than doing a full encode. -// Should only be used after a start/reset. -func (d *compressor) fillWindow(b []byte) { - // Do not fill window if we are in store-only or huffman mode. - if d.level <= 0 { - return - } - if d.fast != nil { - // encode the last data, but discard the result - if len(b) > maxMatchOffset { - b = b[len(b)-maxMatchOffset:] - } - d.fast.Encode(&d.tokens, b) - d.tokens.Reset() - return - } - s := d.state - // If we are given too much, cut it. - if len(b) > windowSize { - b = b[len(b)-windowSize:] - } - // Add all to window. - n := copy(d.window[d.windowEnd:], b) - - // Calculate 256 hashes at the time (more L1 cache hits) - loops := (n + 256 - minMatchLength) / 256 - for j := 0; j < loops; j++ { - startindex := j * 256 - end := startindex + 256 + minMatchLength - 1 - if end > n { - end = n - } - tocheck := d.window[startindex:end] - dstSize := len(tocheck) - minMatchLength + 1 - - if dstSize <= 0 { - continue - } - - dst := s.hashMatch[:dstSize] - bulkHash4(tocheck, dst) - var newH uint32 - for i, val := range dst { - di := i + startindex - newH = val & hashMask - // Get previous value with the same hash. - // Our chain should point to the previous value. - s.hashPrev[di&windowMask] = s.hashHead[newH] - // Set the head of the hash chain to us. - s.hashHead[newH] = uint32(di + s.hashOffset) - } - } - // Update window information. - d.windowEnd += n - s.index = n -} - -// Try to find a match starting at index whose length is greater than prevSize. -// We only look at chainCount possibilities before giving up. -// pos = s.index, prevHead = s.chainHead-s.hashOffset, prevLength=minMatchLength-1, lookahead -func (d *compressor) findMatch(pos int, prevHead int, lookahead int) (length, offset int, ok bool) { - minMatchLook := maxMatchLength - if lookahead < minMatchLook { - minMatchLook = lookahead - } - - win := d.window[0 : pos+minMatchLook] - - // We quit when we get a match that's at least nice long - nice := len(win) - pos - if d.nice < nice { - nice = d.nice - } - - // If we've got a match that's good enough, only look in 1/4 the chain. - tries := d.chain - length = minMatchLength - 1 - - wEnd := win[pos+length] - wPos := win[pos:] - minIndex := pos - windowSize - if minIndex < 0 { - minIndex = 0 - } - offset = 0 - - if d.chain < 100 { - for i := prevHead; tries > 0; tries-- { - if wEnd == win[i+length] { - n := matchLen(win[i:i+minMatchLook], wPos) - if n > length { - length = n - offset = pos - i - ok = true - if n >= nice { - // The match is good enough that we don't try to find a better one. - break - } - wEnd = win[pos+n] - } - } - if i <= minIndex { - // hashPrev[i & windowMask] has already been overwritten, so stop now. - break - } - i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset - if i < minIndex { - break - } - } - return - } - - // Minimum gain to accept a match. - cGain := 4 - - // Some like it higher (CSV), some like it lower (JSON) - const baseCost = 3 - // Base is 4 bytes at with an additional cost. - // Matches must be better than this. - - for i := prevHead; tries > 0; tries-- { - if wEnd == win[i+length] { - n := matchLen(win[i:i+minMatchLook], wPos) - if n > length { - // Calculate gain. Estimate - newGain := d.h.bitLengthRaw(wPos[:n]) - int(offsetExtraBits[offsetCode(uint32(pos-i))]) - baseCost - int(lengthExtraBits[lengthCodes[(n-3)&255]]) - - //fmt.Println("gain:", newGain, "prev:", cGain, "raw:", d.h.bitLengthRaw(wPos[:n]), "this-len:", n, "prev-len:", length) - if newGain > cGain { - length = n - offset = pos - i - cGain = newGain - ok = true - if n >= nice { - // The match is good enough that we don't try to find a better one. - break - } - wEnd = win[pos+n] - } - } - } - if i <= minIndex { - // hashPrev[i & windowMask] has already been overwritten, so stop now. - break - } - i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset - if i < minIndex { - break - } - } - return -} - -func (d *compressor) writeStoredBlock(buf []byte) error { - if d.w.writeStoredHeader(len(buf), false); d.w.err != nil { - return d.w.err - } - d.w.writeBytes(buf) - return d.w.err -} - -// hash4 returns a hash representation of the first 4 bytes -// of the supplied slice. -// The caller must ensure that len(b) >= 4. -func hash4(b []byte) uint32 { - return hash4u(binary.LittleEndian.Uint32(b), hashBits) -} - -// hash4 returns the hash of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <32. -func hash4u(u uint32, h uint8) uint32 { - return (u * prime4bytes) >> (32 - h) -} - -// bulkHash4 will compute hashes using the same -// algorithm as hash4 -func bulkHash4(b []byte, dst []uint32) { - if len(b) < 4 { - return - } - hb := binary.LittleEndian.Uint32(b) - - dst[0] = hash4u(hb, hashBits) - end := len(b) - 4 + 1 - for i := 1; i < end; i++ { - hb = (hb >> 8) | uint32(b[i+3])<<24 - dst[i] = hash4u(hb, hashBits) - } -} - -func (d *compressor) initDeflate() { - d.window = make([]byte, 2*windowSize) - d.byteAvailable = false - d.err = nil - if d.state == nil { - return - } - s := d.state - s.index = 0 - s.hashOffset = 1 - s.length = minMatchLength - 1 - s.offset = 0 - s.chainHead = -1 -} - -// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever, -// meaning it always has lazy matching on. -func (d *compressor) deflateLazy() { - s := d.state - // Sanity enables additional runtime tests. - // It's intended to be used during development - // to supplement the currently ad-hoc unit tests. - const sanity = debugDeflate - - if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync { - return - } - if d.windowEnd != s.index && d.chain > 100 { - // Get literal huffman coder. - if d.h == nil { - d.h = newHuffmanEncoder(maxFlateBlockTokens) - } - var tmp [256]uint16 - for _, v := range d.window[s.index:d.windowEnd] { - tmp[v]++ - } - d.h.generate(tmp[:], 15) - } - - s.maxInsertIndex = d.windowEnd - (minMatchLength - 1) - - for { - if sanity && s.index > d.windowEnd { - panic("index > windowEnd") - } - lookahead := d.windowEnd - s.index - if lookahead < minMatchLength+maxMatchLength { - if !d.sync { - return - } - if sanity && s.index > d.windowEnd { - panic("index > windowEnd") - } - if lookahead == 0 { - // Flush current output block if any. - if d.byteAvailable { - // There is still one pending token that needs to be flushed - d.tokens.AddLiteral(d.window[s.index-1]) - d.byteAvailable = false - } - if d.tokens.n > 0 { - if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.Reset() - } - return - } - } - if s.index < s.maxInsertIndex { - // Update the hash - hash := hash4(d.window[s.index:]) - ch := s.hashHead[hash] - s.chainHead = int(ch) - s.hashPrev[s.index&windowMask] = ch - s.hashHead[hash] = uint32(s.index + s.hashOffset) - } - prevLength := s.length - prevOffset := s.offset - s.length = minMatchLength - 1 - s.offset = 0 - minIndex := s.index - windowSize - if minIndex < 0 { - minIndex = 0 - } - - if s.chainHead-s.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy { - if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, lookahead); ok { - s.length = newLength - s.offset = newOffset - } - } - - if prevLength >= minMatchLength && s.length <= prevLength { - // No better match, but check for better match at end... - // - // Skip forward a number of bytes. - // Offset of 2 seems to yield best results. 3 is sometimes better. - const checkOff = 2 - - // Check all, except full length - if prevLength < maxMatchLength-checkOff { - prevIndex := s.index - 1 - if prevIndex+prevLength < s.maxInsertIndex { - end := lookahead - if lookahead > maxMatchLength+checkOff { - end = maxMatchLength + checkOff - } - end += prevIndex - - // Hash at match end. - h := hash4(d.window[prevIndex+prevLength:]) - ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength - if prevIndex-ch2 != prevOffset && ch2 > minIndex+checkOff { - length := matchLen(d.window[prevIndex+checkOff:end], d.window[ch2+checkOff:]) - // It seems like a pure length metric is best. - if length > prevLength { - prevLength = length - prevOffset = prevIndex - ch2 - - // Extend back... - for i := checkOff - 1; i >= 0; i-- { - if prevLength >= maxMatchLength || d.window[prevIndex+i] != d.window[ch2+i] { - // Emit tokens we "owe" - for j := 0; j <= i; j++ { - d.tokens.AddLiteral(d.window[prevIndex+j]) - if d.tokens.n == maxFlateBlockTokens { - // The block includes the current character - if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.Reset() - } - s.index++ - if s.index < s.maxInsertIndex { - h := hash4(d.window[s.index:]) - ch := s.hashHead[h] - s.chainHead = int(ch) - s.hashPrev[s.index&windowMask] = ch - s.hashHead[h] = uint32(s.index + s.hashOffset) - } - } - break - } else { - prevLength++ - } - } - } else if false { - // Check one further ahead. - // Only rarely better, disabled for now. - prevIndex++ - h := hash4(d.window[prevIndex+prevLength:]) - ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength - if prevIndex-ch2 != prevOffset && ch2 > minIndex+checkOff { - length := matchLen(d.window[prevIndex+checkOff:end], d.window[ch2+checkOff:]) - // It seems like a pure length metric is best. - if length > prevLength+checkOff { - prevLength = length - prevOffset = prevIndex - ch2 - prevIndex-- - - // Extend back... - for i := checkOff; i >= 0; i-- { - if prevLength >= maxMatchLength || d.window[prevIndex+i] != d.window[ch2+i-1] { - // Emit tokens we "owe" - for j := 0; j <= i; j++ { - d.tokens.AddLiteral(d.window[prevIndex+j]) - if d.tokens.n == maxFlateBlockTokens { - // The block includes the current character - if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.Reset() - } - s.index++ - if s.index < s.maxInsertIndex { - h := hash4(d.window[s.index:]) - ch := s.hashHead[h] - s.chainHead = int(ch) - s.hashPrev[s.index&windowMask] = ch - s.hashHead[h] = uint32(s.index + s.hashOffset) - } - } - break - } else { - prevLength++ - } - } - } - } - } - } - } - } - // There was a match at the previous step, and the current match is - // not better. Output the previous match. - d.tokens.AddMatch(uint32(prevLength-3), uint32(prevOffset-minOffsetSize)) - - // Insert in the hash table all strings up to the end of the match. - // index and index-1 are already inserted. If there is not enough - // lookahead, the last two strings are not inserted into the hash - // table. - newIndex := s.index + prevLength - 1 - // Calculate missing hashes - end := newIndex - if end > s.maxInsertIndex { - end = s.maxInsertIndex - } - end += minMatchLength - 1 - startindex := s.index + 1 - if startindex > s.maxInsertIndex { - startindex = s.maxInsertIndex - } - tocheck := d.window[startindex:end] - dstSize := len(tocheck) - minMatchLength + 1 - if dstSize > 0 { - dst := s.hashMatch[:dstSize] - bulkHash4(tocheck, dst) - var newH uint32 - for i, val := range dst { - di := i + startindex - newH = val & hashMask - // Get previous value with the same hash. - // Our chain should point to the previous value. - s.hashPrev[di&windowMask] = s.hashHead[newH] - // Set the head of the hash chain to us. - s.hashHead[newH] = uint32(di + s.hashOffset) - } - } - - s.index = newIndex - d.byteAvailable = false - s.length = minMatchLength - 1 - if d.tokens.n == maxFlateBlockTokens { - // The block includes the current character - if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.Reset() - } - s.ii = 0 - } else { - // Reset, if we got a match this run. - if s.length >= minMatchLength { - s.ii = 0 - } - // We have a byte waiting. Emit it. - if d.byteAvailable { - s.ii++ - d.tokens.AddLiteral(d.window[s.index-1]) - if d.tokens.n == maxFlateBlockTokens { - if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.Reset() - } - s.index++ - - // If we have a long run of no matches, skip additional bytes - // Resets when s.ii overflows after 64KB. - if n := int(s.ii) - d.chain; n > 0 { - n = 1 + int(n>>6) - for j := 0; j < n; j++ { - if s.index >= d.windowEnd-1 { - break - } - d.tokens.AddLiteral(d.window[s.index-1]) - if d.tokens.n == maxFlateBlockTokens { - if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.Reset() - } - // Index... - if s.index < s.maxInsertIndex { - h := hash4(d.window[s.index:]) - ch := s.hashHead[h] - s.chainHead = int(ch) - s.hashPrev[s.index&windowMask] = ch - s.hashHead[h] = uint32(s.index + s.hashOffset) - } - s.index++ - } - // Flush last byte - d.tokens.AddLiteral(d.window[s.index-1]) - d.byteAvailable = false - // s.length = minMatchLength - 1 // not needed, since s.ii is reset above, so it should never be > minMatchLength - if d.tokens.n == maxFlateBlockTokens { - if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.Reset() - } - } - } else { - s.index++ - d.byteAvailable = true - } - } - } -} - -func (d *compressor) store() { - if d.windowEnd > 0 && (d.windowEnd == maxStoreBlockSize || d.sync) { - d.err = d.writeStoredBlock(d.window[:d.windowEnd]) - d.windowEnd = 0 - } -} - -// fillWindow will fill the buffer with data for huffman-only compression. -// The number of bytes copied is returned. -func (d *compressor) fillBlock(b []byte) int { - n := copy(d.window[d.windowEnd:], b) - d.windowEnd += n - return n -} - -// storeHuff will compress and store the currently added data, -// if enough has been accumulated or we at the end of the stream. -// Any error that occurred will be in d.err -func (d *compressor) storeHuff() { - if d.windowEnd < len(d.window) && !d.sync || d.windowEnd == 0 { - return - } - d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync) - d.err = d.w.err - d.windowEnd = 0 -} - -// storeFast will compress and store the currently added data, -// if enough has been accumulated or we at the end of the stream. -// Any error that occurred will be in d.err -func (d *compressor) storeFast() { - // We only compress if we have maxStoreBlockSize. - if d.windowEnd < len(d.window) { - if !d.sync { - return - } - // Handle extremely small sizes. - if d.windowEnd < 128 { - if d.windowEnd == 0 { - return - } - if d.windowEnd <= 32 { - d.err = d.writeStoredBlock(d.window[:d.windowEnd]) - } else { - d.w.writeBlockHuff(false, d.window[:d.windowEnd], true) - d.err = d.w.err - } - d.tokens.Reset() - d.windowEnd = 0 - d.fast.Reset() - return - } - } - - d.fast.Encode(&d.tokens, d.window[:d.windowEnd]) - // If we made zero matches, store the block as is. - if d.tokens.n == 0 { - d.err = d.writeStoredBlock(d.window[:d.windowEnd]) - // If we removed less than 1/16th, huffman compress the block. - } else if int(d.tokens.n) > d.windowEnd-(d.windowEnd>>4) { - d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync) - d.err = d.w.err - } else { - d.w.writeBlockDynamic(&d.tokens, false, d.window[:d.windowEnd], d.sync) - d.err = d.w.err - } - d.tokens.Reset() - d.windowEnd = 0 -} - -// write will add input byte to the stream. -// Unless an error occurs all bytes will be consumed. -func (d *compressor) write(b []byte) (n int, err error) { - if d.err != nil { - return 0, d.err - } - n = len(b) - for len(b) > 0 { - if d.windowEnd == len(d.window) || d.sync { - d.step(d) - } - b = b[d.fill(d, b):] - if d.err != nil { - return 0, d.err - } - } - return n, d.err -} - -func (d *compressor) syncFlush() error { - d.sync = true - if d.err != nil { - return d.err - } - d.step(d) - if d.err == nil { - d.w.writeStoredHeader(0, false) - d.w.flush() - d.err = d.w.err - } - d.sync = false - return d.err -} - -func (d *compressor) init(w io.Writer, level int) (err error) { - d.w = newHuffmanBitWriter(w) - - switch { - case level == NoCompression: - d.window = make([]byte, maxStoreBlockSize) - d.fill = (*compressor).fillBlock - d.step = (*compressor).store - case level == ConstantCompression: - d.w.logNewTablePenalty = 10 - d.window = make([]byte, 32<<10) - d.fill = (*compressor).fillBlock - d.step = (*compressor).storeHuff - case level == DefaultCompression: - level = 5 - fallthrough - case level >= 1 && level <= 6: - d.w.logNewTablePenalty = 7 - d.fast = newFastEnc(level) - d.window = make([]byte, maxStoreBlockSize) - d.fill = (*compressor).fillBlock - d.step = (*compressor).storeFast - case 7 <= level && level <= 9: - d.w.logNewTablePenalty = 8 - d.state = &advancedState{} - d.compressionLevel = levels[level] - d.initDeflate() - d.fill = (*compressor).fillDeflate - d.step = (*compressor).deflateLazy - default: - return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level) - } - d.level = level - return nil -} - -// reset the state of the compressor. -func (d *compressor) reset(w io.Writer) { - d.w.reset(w) - d.sync = false - d.err = nil - // We only need to reset a few things for Snappy. - if d.fast != nil { - d.fast.Reset() - d.windowEnd = 0 - d.tokens.Reset() - return - } - switch d.compressionLevel.chain { - case 0: - // level was NoCompression or ConstantCompresssion. - d.windowEnd = 0 - default: - s := d.state - s.chainHead = -1 - for i := range s.hashHead { - s.hashHead[i] = 0 - } - for i := range s.hashPrev { - s.hashPrev[i] = 0 - } - s.hashOffset = 1 - s.index, d.windowEnd = 0, 0 - d.blockStart, d.byteAvailable = 0, false - d.tokens.Reset() - s.length = minMatchLength - 1 - s.offset = 0 - s.ii = 0 - s.maxInsertIndex = 0 - } -} - -func (d *compressor) close() error { - if d.err != nil { - return d.err - } - d.sync = true - d.step(d) - if d.err != nil { - return d.err - } - if d.w.writeStoredHeader(0, true); d.w.err != nil { - return d.w.err - } - d.w.flush() - d.w.reset(nil) - return d.w.err -} - -// NewWriter returns a new Writer compressing data at the given level. -// Following zlib, levels range from 1 (BestSpeed) to 9 (BestCompression); -// higher levels typically run slower but compress more. -// Level 0 (NoCompression) does not attempt any compression; it only adds the -// necessary DEFLATE framing. -// Level -1 (DefaultCompression) uses the default compression level. -// Level -2 (ConstantCompression) will use Huffman compression only, giving -// a very fast compression for all types of input, but sacrificing considerable -// compression efficiency. -// -// If level is in the range [-2, 9] then the error returned will be nil. -// Otherwise the error returned will be non-nil. -func NewWriter(w io.Writer, level int) (*Writer, error) { - var dw Writer - if err := dw.d.init(w, level); err != nil { - return nil, err - } - return &dw, nil -} - -// NewWriterDict is like NewWriter but initializes the new -// Writer with a preset dictionary. The returned Writer behaves -// as if the dictionary had been written to it without producing -// any compressed output. The compressed data written to w -// can only be decompressed by a Reader initialized with the -// same dictionary. -func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) { - zw, err := NewWriter(w, level) - if err != nil { - return nil, err - } - zw.d.fillWindow(dict) - zw.dict = append(zw.dict, dict...) // duplicate dictionary for Reset method. - return zw, err -} - -// A Writer takes data written to it and writes the compressed -// form of that data to an underlying writer (see NewWriter). -type Writer struct { - d compressor - dict []byte -} - -// Write writes data to w, which will eventually write the -// compressed form of data to its underlying writer. -func (w *Writer) Write(data []byte) (n int, err error) { - return w.d.write(data) -} - -// Flush flushes any pending data to the underlying writer. -// It is useful mainly in compressed network protocols, to ensure that -// a remote reader has enough data to reconstruct a packet. -// Flush does not return until the data has been written. -// Calling Flush when there is no pending data still causes the Writer -// to emit a sync marker of at least 4 bytes. -// If the underlying writer returns an error, Flush returns that error. -// -// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH. -func (w *Writer) Flush() error { - // For more about flushing: - // http://www.bolet.org/~pornin/deflate-flush.html - return w.d.syncFlush() -} - -// Close flushes and closes the writer. -func (w *Writer) Close() error { - return w.d.close() -} - -// Reset discards the writer's state and makes it equivalent to -// the result of NewWriter or NewWriterDict called with dst -// and w's level and dictionary. -func (w *Writer) Reset(dst io.Writer) { - if len(w.dict) > 0 { - // w was created with NewWriterDict - w.d.reset(dst) - if dst != nil { - w.d.fillWindow(w.dict) - } - } else { - // w was created with NewWriter - w.d.reset(dst) - } -} - -// ResetDict discards the writer's state and makes it equivalent to -// the result of NewWriter or NewWriterDict called with dst -// and w's level, but sets a specific dictionary. -func (w *Writer) ResetDict(dst io.Writer, dict []byte) { - w.dict = dict - w.d.reset(dst) - w.d.fillWindow(w.dict) -} +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright (c) 2015 Klaus Post +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "encoding/binary" + "fmt" + "io" + "math" +) + +const ( + NoCompression = 0 + BestSpeed = 1 + BestCompression = 9 + DefaultCompression = -1 + + // HuffmanOnly disables Lempel-Ziv match searching and only performs Huffman + // entropy encoding. This mode is useful in compressing data that has + // already been compressed with an LZ style algorithm (e.g. Snappy or LZ4) + // that lacks an entropy encoder. Compression gains are achieved when + // certain bytes in the input stream occur more frequently than others. + // + // Note that HuffmanOnly produces a compressed output that is + // RFC 1951 compliant. That is, any valid DEFLATE decompressor will + // continue to be able to decompress this output. + HuffmanOnly = -2 + ConstantCompression = HuffmanOnly // compatibility alias. + + logWindowSize = 15 + windowSize = 1 << logWindowSize + windowMask = windowSize - 1 + logMaxOffsetSize = 15 // Standard DEFLATE + minMatchLength = 4 // The smallest match that the compressor looks for + maxMatchLength = 258 // The longest match for the compressor + minOffsetSize = 1 // The shortest offset that makes any sense + + // The maximum number of tokens we will encode at the time. + // Smaller sizes usually creates less optimal blocks. + // Bigger can make context switching slow. + // We use this for levels 7-9, so we make it big. + maxFlateBlockTokens = 1 << 15 + maxStoreBlockSize = 65535 + hashBits = 17 // After 17 performance degrades + hashSize = 1 << hashBits + hashMask = (1 << hashBits) - 1 + hashShift = (hashBits + minMatchLength - 1) / minMatchLength + maxHashOffset = 1 << 28 + + skipNever = math.MaxInt32 + + debugDeflate = false +) + +type compressionLevel struct { + good, lazy, nice, chain, fastSkipHashing, level int +} + +// Compression levels have been rebalanced from zlib deflate defaults +// to give a bigger spread in speed and compression. +// See https://blog.klauspost.com/rebalancing-deflate-compression-levels/ +var levels = []compressionLevel{ + {}, // 0 + // Level 1-6 uses specialized algorithm - values not used + {0, 0, 0, 0, 0, 1}, + {0, 0, 0, 0, 0, 2}, + {0, 0, 0, 0, 0, 3}, + {0, 0, 0, 0, 0, 4}, + {0, 0, 0, 0, 0, 5}, + {0, 0, 0, 0, 0, 6}, + // Levels 7-9 use increasingly more lazy matching + // and increasingly stringent conditions for "good enough". + {8, 12, 16, 24, skipNever, 7}, + {16, 30, 40, 64, skipNever, 8}, + {32, 258, 258, 1024, skipNever, 9}, +} + +// advancedState contains state for the advanced levels, with bigger hash tables, etc. +type advancedState struct { + // deflate state + length int + offset int + maxInsertIndex int + chainHead int + hashOffset int + + ii uint16 // position of last match, intended to overflow to reset. + + // input window: unprocessed data is window[index:windowEnd] + index int + estBitsPerByte int + hashMatch [maxMatchLength + minMatchLength]uint32 + + // Input hash chains + // hashHead[hashValue] contains the largest inputIndex with the specified hash value + // If hashHead[hashValue] is within the current window, then + // hashPrev[hashHead[hashValue] & windowMask] contains the previous index + // with the same hash value. + hashHead [hashSize]uint32 + hashPrev [windowSize]uint32 +} + +type compressor struct { + compressionLevel + + h *huffmanEncoder + w *huffmanBitWriter + + // compression algorithm + fill func(*compressor, []byte) int // copy data to window + step func(*compressor) // process window + + window []byte + windowEnd int + blockStart int // window index where current tokens start + err error + + // queued output tokens + tokens tokens + fast fastEnc + state *advancedState + + sync bool // requesting flush + byteAvailable bool // if true, still need to process window[index-1]. +} + +func (d *compressor) fillDeflate(b []byte) int { + s := d.state + if s.index >= 2*windowSize-(minMatchLength+maxMatchLength) { + // shift the window by windowSize + //copy(d.window[:], d.window[windowSize:2*windowSize]) + *(*[windowSize]byte)(d.window) = *(*[windowSize]byte)(d.window[windowSize:]) + s.index -= windowSize + d.windowEnd -= windowSize + if d.blockStart >= windowSize { + d.blockStart -= windowSize + } else { + d.blockStart = math.MaxInt32 + } + s.hashOffset += windowSize + if s.hashOffset > maxHashOffset { + delta := s.hashOffset - 1 + s.hashOffset -= delta + s.chainHead -= delta + // Iterate over slices instead of arrays to avoid copying + // the entire table onto the stack (Issue #18625). + for i, v := range s.hashPrev[:] { + if int(v) > delta { + s.hashPrev[i] = uint32(int(v) - delta) + } else { + s.hashPrev[i] = 0 + } + } + for i, v := range s.hashHead[:] { + if int(v) > delta { + s.hashHead[i] = uint32(int(v) - delta) + } else { + s.hashHead[i] = 0 + } + } + } + } + n := copy(d.window[d.windowEnd:], b) + d.windowEnd += n + return n +} + +func (d *compressor) writeBlock(tok *tokens, index int, eof bool) error { + if index > 0 || eof { + var window []byte + if d.blockStart <= index { + window = d.window[d.blockStart:index] + } + d.blockStart = index + //d.w.writeBlock(tok, eof, window) + d.w.writeBlockDynamic(tok, eof, window, d.sync) + return d.w.err + } + return nil +} + +// writeBlockSkip writes the current block and uses the number of tokens +// to determine if the block should be stored on no matches, or +// only huffman encoded. +func (d *compressor) writeBlockSkip(tok *tokens, index int, eof bool) error { + if index > 0 || eof { + if d.blockStart <= index { + window := d.window[d.blockStart:index] + // If we removed less than a 64th of all literals + // we huffman compress the block. + if int(tok.n) > len(window)-int(tok.n>>6) { + d.w.writeBlockHuff(eof, window, d.sync) + } else { + // Write a dynamic huffman block. + d.w.writeBlockDynamic(tok, eof, window, d.sync) + } + } else { + d.w.writeBlock(tok, eof, nil) + } + d.blockStart = index + return d.w.err + } + return nil +} + +// fillWindow will fill the current window with the supplied +// dictionary and calculate all hashes. +// This is much faster than doing a full encode. +// Should only be used after a start/reset. +func (d *compressor) fillWindow(b []byte) { + // Do not fill window if we are in store-only or huffman mode. + if d.level <= 0 { + return + } + if d.fast != nil { + // encode the last data, but discard the result + if len(b) > maxMatchOffset { + b = b[len(b)-maxMatchOffset:] + } + d.fast.Encode(&d.tokens, b) + d.tokens.Reset() + return + } + s := d.state + // If we are given too much, cut it. + if len(b) > windowSize { + b = b[len(b)-windowSize:] + } + // Add all to window. + n := copy(d.window[d.windowEnd:], b) + + // Calculate 256 hashes at the time (more L1 cache hits) + loops := (n + 256 - minMatchLength) / 256 + for j := 0; j < loops; j++ { + startindex := j * 256 + end := startindex + 256 + minMatchLength - 1 + if end > n { + end = n + } + tocheck := d.window[startindex:end] + dstSize := len(tocheck) - minMatchLength + 1 + + if dstSize <= 0 { + continue + } + + dst := s.hashMatch[:dstSize] + bulkHash4(tocheck, dst) + var newH uint32 + for i, val := range dst { + di := i + startindex + newH = val & hashMask + // Get previous value with the same hash. + // Our chain should point to the previous value. + s.hashPrev[di&windowMask] = s.hashHead[newH] + // Set the head of the hash chain to us. + s.hashHead[newH] = uint32(di + s.hashOffset) + } + } + // Update window information. + d.windowEnd += n + s.index = n +} + +// Try to find a match starting at index whose length is greater than prevSize. +// We only look at chainCount possibilities before giving up. +// pos = s.index, prevHead = s.chainHead-s.hashOffset, prevLength=minMatchLength-1, lookahead +func (d *compressor) findMatch(pos int, prevHead int, lookahead int) (length, offset int, ok bool) { + minMatchLook := maxMatchLength + if lookahead < minMatchLook { + minMatchLook = lookahead + } + + win := d.window[0 : pos+minMatchLook] + + // We quit when we get a match that's at least nice long + nice := len(win) - pos + if d.nice < nice { + nice = d.nice + } + + // If we've got a match that's good enough, only look in 1/4 the chain. + tries := d.chain + length = minMatchLength - 1 + + wEnd := win[pos+length] + wPos := win[pos:] + minIndex := pos - windowSize + if minIndex < 0 { + minIndex = 0 + } + offset = 0 + + if d.chain < 100 { + for i := prevHead; tries > 0; tries-- { + if wEnd == win[i+length] { + n := matchLen(win[i:i+minMatchLook], wPos) + if n > length { + length = n + offset = pos - i + ok = true + if n >= nice { + // The match is good enough that we don't try to find a better one. + break + } + wEnd = win[pos+n] + } + } + if i <= minIndex { + // hashPrev[i & windowMask] has already been overwritten, so stop now. + break + } + i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset + if i < minIndex { + break + } + } + return + } + + // Minimum gain to accept a match. + cGain := 4 + + // Some like it higher (CSV), some like it lower (JSON) + const baseCost = 3 + // Base is 4 bytes at with an additional cost. + // Matches must be better than this. + + for i := prevHead; tries > 0; tries-- { + if wEnd == win[i+length] { + n := matchLen(win[i:i+minMatchLook], wPos) + if n > length { + // Calculate gain. Estimate + newGain := d.h.bitLengthRaw(wPos[:n]) - int(offsetExtraBits[offsetCode(uint32(pos-i))]) - baseCost - int(lengthExtraBits[lengthCodes[(n-3)&255]]) + + //fmt.Println("gain:", newGain, "prev:", cGain, "raw:", d.h.bitLengthRaw(wPos[:n]), "this-len:", n, "prev-len:", length) + if newGain > cGain { + length = n + offset = pos - i + cGain = newGain + ok = true + if n >= nice { + // The match is good enough that we don't try to find a better one. + break + } + wEnd = win[pos+n] + } + } + } + if i <= minIndex { + // hashPrev[i & windowMask] has already been overwritten, so stop now. + break + } + i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset + if i < minIndex { + break + } + } + return +} + +func (d *compressor) writeStoredBlock(buf []byte) error { + if d.w.writeStoredHeader(len(buf), false); d.w.err != nil { + return d.w.err + } + d.w.writeBytes(buf) + return d.w.err +} + +// hash4 returns a hash representation of the first 4 bytes +// of the supplied slice. +// The caller must ensure that len(b) >= 4. +func hash4(b []byte) uint32 { + return hash4u(binary.LittleEndian.Uint32(b), hashBits) +} + +// hash4 returns the hash of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <32. +func hash4u(u uint32, h uint8) uint32 { + return (u * prime4bytes) >> (32 - h) +} + +// bulkHash4 will compute hashes using the same +// algorithm as hash4 +func bulkHash4(b []byte, dst []uint32) { + if len(b) < 4 { + return + } + hb := binary.LittleEndian.Uint32(b) + + dst[0] = hash4u(hb, hashBits) + end := len(b) - 4 + 1 + for i := 1; i < end; i++ { + hb = (hb >> 8) | uint32(b[i+3])<<24 + dst[i] = hash4u(hb, hashBits) + } +} + +func (d *compressor) initDeflate() { + d.window = make([]byte, 2*windowSize) + d.byteAvailable = false + d.err = nil + if d.state == nil { + return + } + s := d.state + s.index = 0 + s.hashOffset = 1 + s.length = minMatchLength - 1 + s.offset = 0 + s.chainHead = -1 +} + +// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever, +// meaning it always has lazy matching on. +func (d *compressor) deflateLazy() { + s := d.state + // Sanity enables additional runtime tests. + // It's intended to be used during development + // to supplement the currently ad-hoc unit tests. + const sanity = debugDeflate + + if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync { + return + } + if d.windowEnd != s.index && d.chain > 100 { + // Get literal huffman coder. + if d.h == nil { + d.h = newHuffmanEncoder(maxFlateBlockTokens) + } + var tmp [256]uint16 + for _, v := range d.window[s.index:d.windowEnd] { + tmp[v]++ + } + d.h.generate(tmp[:], 15) + } + + s.maxInsertIndex = d.windowEnd - (minMatchLength - 1) + + for { + if sanity && s.index > d.windowEnd { + panic("index > windowEnd") + } + lookahead := d.windowEnd - s.index + if lookahead < minMatchLength+maxMatchLength { + if !d.sync { + return + } + if sanity && s.index > d.windowEnd { + panic("index > windowEnd") + } + if lookahead == 0 { + // Flush current output block if any. + if d.byteAvailable { + // There is still one pending token that needs to be flushed + d.tokens.AddLiteral(d.window[s.index-1]) + d.byteAvailable = false + } + if d.tokens.n > 0 { + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { + return + } + d.tokens.Reset() + } + return + } + } + if s.index < s.maxInsertIndex { + // Update the hash + hash := hash4(d.window[s.index:]) + ch := s.hashHead[hash] + s.chainHead = int(ch) + s.hashPrev[s.index&windowMask] = ch + s.hashHead[hash] = uint32(s.index + s.hashOffset) + } + prevLength := s.length + prevOffset := s.offset + s.length = minMatchLength - 1 + s.offset = 0 + minIndex := s.index - windowSize + if minIndex < 0 { + minIndex = 0 + } + + if s.chainHead-s.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy { + if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, lookahead); ok { + s.length = newLength + s.offset = newOffset + } + } + + if prevLength >= minMatchLength && s.length <= prevLength { + // No better match, but check for better match at end... + // + // Skip forward a number of bytes. + // Offset of 2 seems to yield best results. 3 is sometimes better. + const checkOff = 2 + + // Check all, except full length + if prevLength < maxMatchLength-checkOff { + prevIndex := s.index - 1 + if prevIndex+prevLength < s.maxInsertIndex { + end := lookahead + if lookahead > maxMatchLength+checkOff { + end = maxMatchLength + checkOff + } + end += prevIndex + + // Hash at match end. + h := hash4(d.window[prevIndex+prevLength:]) + ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength + if prevIndex-ch2 != prevOffset && ch2 > minIndex+checkOff { + length := matchLen(d.window[prevIndex+checkOff:end], d.window[ch2+checkOff:]) + // It seems like a pure length metric is best. + if length > prevLength { + prevLength = length + prevOffset = prevIndex - ch2 + + // Extend back... + for i := checkOff - 1; i >= 0; i-- { + if prevLength >= maxMatchLength || d.window[prevIndex+i] != d.window[ch2+i] { + // Emit tokens we "owe" + for j := 0; j <= i; j++ { + d.tokens.AddLiteral(d.window[prevIndex+j]) + if d.tokens.n == maxFlateBlockTokens { + // The block includes the current character + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { + return + } + d.tokens.Reset() + } + s.index++ + if s.index < s.maxInsertIndex { + h := hash4(d.window[s.index:]) + ch := s.hashHead[h] + s.chainHead = int(ch) + s.hashPrev[s.index&windowMask] = ch + s.hashHead[h] = uint32(s.index + s.hashOffset) + } + } + break + } else { + prevLength++ + } + } + } else if false { + // Check one further ahead. + // Only rarely better, disabled for now. + prevIndex++ + h := hash4(d.window[prevIndex+prevLength:]) + ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength + if prevIndex-ch2 != prevOffset && ch2 > minIndex+checkOff { + length := matchLen(d.window[prevIndex+checkOff:end], d.window[ch2+checkOff:]) + // It seems like a pure length metric is best. + if length > prevLength+checkOff { + prevLength = length + prevOffset = prevIndex - ch2 + prevIndex-- + + // Extend back... + for i := checkOff; i >= 0; i-- { + if prevLength >= maxMatchLength || d.window[prevIndex+i] != d.window[ch2+i-1] { + // Emit tokens we "owe" + for j := 0; j <= i; j++ { + d.tokens.AddLiteral(d.window[prevIndex+j]) + if d.tokens.n == maxFlateBlockTokens { + // The block includes the current character + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { + return + } + d.tokens.Reset() + } + s.index++ + if s.index < s.maxInsertIndex { + h := hash4(d.window[s.index:]) + ch := s.hashHead[h] + s.chainHead = int(ch) + s.hashPrev[s.index&windowMask] = ch + s.hashHead[h] = uint32(s.index + s.hashOffset) + } + } + break + } else { + prevLength++ + } + } + } + } + } + } + } + } + // There was a match at the previous step, and the current match is + // not better. Output the previous match. + d.tokens.AddMatch(uint32(prevLength-3), uint32(prevOffset-minOffsetSize)) + + // Insert in the hash table all strings up to the end of the match. + // index and index-1 are already inserted. If there is not enough + // lookahead, the last two strings are not inserted into the hash + // table. + newIndex := s.index + prevLength - 1 + // Calculate missing hashes + end := newIndex + if end > s.maxInsertIndex { + end = s.maxInsertIndex + } + end += minMatchLength - 1 + startindex := s.index + 1 + if startindex > s.maxInsertIndex { + startindex = s.maxInsertIndex + } + tocheck := d.window[startindex:end] + dstSize := len(tocheck) - minMatchLength + 1 + if dstSize > 0 { + dst := s.hashMatch[:dstSize] + bulkHash4(tocheck, dst) + var newH uint32 + for i, val := range dst { + di := i + startindex + newH = val & hashMask + // Get previous value with the same hash. + // Our chain should point to the previous value. + s.hashPrev[di&windowMask] = s.hashHead[newH] + // Set the head of the hash chain to us. + s.hashHead[newH] = uint32(di + s.hashOffset) + } + } + + s.index = newIndex + d.byteAvailable = false + s.length = minMatchLength - 1 + if d.tokens.n == maxFlateBlockTokens { + // The block includes the current character + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { + return + } + d.tokens.Reset() + } + s.ii = 0 + } else { + // Reset, if we got a match this run. + if s.length >= minMatchLength { + s.ii = 0 + } + // We have a byte waiting. Emit it. + if d.byteAvailable { + s.ii++ + d.tokens.AddLiteral(d.window[s.index-1]) + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { + return + } + d.tokens.Reset() + } + s.index++ + + // If we have a long run of no matches, skip additional bytes + // Resets when s.ii overflows after 64KB. + if n := int(s.ii) - d.chain; n > 0 { + n = 1 + int(n>>6) + for j := 0; j < n; j++ { + if s.index >= d.windowEnd-1 { + break + } + d.tokens.AddLiteral(d.window[s.index-1]) + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { + return + } + d.tokens.Reset() + } + // Index... + if s.index < s.maxInsertIndex { + h := hash4(d.window[s.index:]) + ch := s.hashHead[h] + s.chainHead = int(ch) + s.hashPrev[s.index&windowMask] = ch + s.hashHead[h] = uint32(s.index + s.hashOffset) + } + s.index++ + } + // Flush last byte + d.tokens.AddLiteral(d.window[s.index-1]) + d.byteAvailable = false + // s.length = minMatchLength - 1 // not needed, since s.ii is reset above, so it should never be > minMatchLength + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { + return + } + d.tokens.Reset() + } + } + } else { + s.index++ + d.byteAvailable = true + } + } + } +} + +func (d *compressor) store() { + if d.windowEnd > 0 && (d.windowEnd == maxStoreBlockSize || d.sync) { + d.err = d.writeStoredBlock(d.window[:d.windowEnd]) + d.windowEnd = 0 + } +} + +// fillWindow will fill the buffer with data for huffman-only compression. +// The number of bytes copied is returned. +func (d *compressor) fillBlock(b []byte) int { + n := copy(d.window[d.windowEnd:], b) + d.windowEnd += n + return n +} + +// storeHuff will compress and store the currently added data, +// if enough has been accumulated or we at the end of the stream. +// Any error that occurred will be in d.err +func (d *compressor) storeHuff() { + if d.windowEnd < len(d.window) && !d.sync || d.windowEnd == 0 { + return + } + d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync) + d.err = d.w.err + d.windowEnd = 0 +} + +// storeFast will compress and store the currently added data, +// if enough has been accumulated or we at the end of the stream. +// Any error that occurred will be in d.err +func (d *compressor) storeFast() { + // We only compress if we have maxStoreBlockSize. + if d.windowEnd < len(d.window) { + if !d.sync { + return + } + // Handle extremely small sizes. + if d.windowEnd < 128 { + if d.windowEnd == 0 { + return + } + if d.windowEnd <= 32 { + d.err = d.writeStoredBlock(d.window[:d.windowEnd]) + } else { + d.w.writeBlockHuff(false, d.window[:d.windowEnd], true) + d.err = d.w.err + } + d.tokens.Reset() + d.windowEnd = 0 + d.fast.Reset() + return + } + } + + d.fast.Encode(&d.tokens, d.window[:d.windowEnd]) + // If we made zero matches, store the block as is. + if d.tokens.n == 0 { + d.err = d.writeStoredBlock(d.window[:d.windowEnd]) + // If we removed less than 1/16th, huffman compress the block. + } else if int(d.tokens.n) > d.windowEnd-(d.windowEnd>>4) { + d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync) + d.err = d.w.err + } else { + d.w.writeBlockDynamic(&d.tokens, false, d.window[:d.windowEnd], d.sync) + d.err = d.w.err + } + d.tokens.Reset() + d.windowEnd = 0 +} + +// write will add input byte to the stream. +// Unless an error occurs all bytes will be consumed. +func (d *compressor) write(b []byte) (n int, err error) { + if d.err != nil { + return 0, d.err + } + n = len(b) + for len(b) > 0 { + if d.windowEnd == len(d.window) || d.sync { + d.step(d) + } + b = b[d.fill(d, b):] + if d.err != nil { + return 0, d.err + } + } + return n, d.err +} + +func (d *compressor) syncFlush() error { + d.sync = true + if d.err != nil { + return d.err + } + d.step(d) + if d.err == nil { + d.w.writeStoredHeader(0, false) + d.w.flush() + d.err = d.w.err + } + d.sync = false + return d.err +} + +func (d *compressor) init(w io.Writer, level int) (err error) { + d.w = newHuffmanBitWriter(w) + + switch { + case level == NoCompression: + d.window = make([]byte, maxStoreBlockSize) + d.fill = (*compressor).fillBlock + d.step = (*compressor).store + case level == ConstantCompression: + d.w.logNewTablePenalty = 10 + d.window = make([]byte, 32<<10) + d.fill = (*compressor).fillBlock + d.step = (*compressor).storeHuff + case level == DefaultCompression: + level = 5 + fallthrough + case level >= 1 && level <= 6: + d.w.logNewTablePenalty = 7 + d.fast = newFastEnc(level) + d.window = make([]byte, maxStoreBlockSize) + d.fill = (*compressor).fillBlock + d.step = (*compressor).storeFast + case 7 <= level && level <= 9: + d.w.logNewTablePenalty = 8 + d.state = &advancedState{} + d.compressionLevel = levels[level] + d.initDeflate() + d.fill = (*compressor).fillDeflate + d.step = (*compressor).deflateLazy + default: + return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level) + } + d.level = level + return nil +} + +// reset the state of the compressor. +func (d *compressor) reset(w io.Writer) { + d.w.reset(w) + d.sync = false + d.err = nil + // We only need to reset a few things for Snappy. + if d.fast != nil { + d.fast.Reset() + d.windowEnd = 0 + d.tokens.Reset() + return + } + switch d.compressionLevel.chain { + case 0: + // level was NoCompression or ConstantCompresssion. + d.windowEnd = 0 + default: + s := d.state + s.chainHead = -1 + for i := range s.hashHead { + s.hashHead[i] = 0 + } + for i := range s.hashPrev { + s.hashPrev[i] = 0 + } + s.hashOffset = 1 + s.index, d.windowEnd = 0, 0 + d.blockStart, d.byteAvailable = 0, false + d.tokens.Reset() + s.length = minMatchLength - 1 + s.offset = 0 + s.ii = 0 + s.maxInsertIndex = 0 + } +} + +func (d *compressor) close() error { + if d.err != nil { + return d.err + } + d.sync = true + d.step(d) + if d.err != nil { + return d.err + } + if d.w.writeStoredHeader(0, true); d.w.err != nil { + return d.w.err + } + d.w.flush() + d.w.reset(nil) + return d.w.err +} + +// NewWriter returns a new Writer compressing data at the given level. +// Following zlib, levels range from 1 (BestSpeed) to 9 (BestCompression); +// higher levels typically run slower but compress more. +// Level 0 (NoCompression) does not attempt any compression; it only adds the +// necessary DEFLATE framing. +// Level -1 (DefaultCompression) uses the default compression level. +// Level -2 (ConstantCompression) will use Huffman compression only, giving +// a very fast compression for all types of input, but sacrificing considerable +// compression efficiency. +// +// If level is in the range [-2, 9] then the error returned will be nil. +// Otherwise the error returned will be non-nil. +func NewWriter(w io.Writer, level int) (*Writer, error) { + var dw Writer + if err := dw.d.init(w, level); err != nil { + return nil, err + } + return &dw, nil +} + +// NewWriterDict is like NewWriter but initializes the new +// Writer with a preset dictionary. The returned Writer behaves +// as if the dictionary had been written to it without producing +// any compressed output. The compressed data written to w +// can only be decompressed by a Reader initialized with the +// same dictionary. +func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) { + zw, err := NewWriter(w, level) + if err != nil { + return nil, err + } + zw.d.fillWindow(dict) + zw.dict = append(zw.dict, dict...) // duplicate dictionary for Reset method. + return zw, err +} + +// A Writer takes data written to it and writes the compressed +// form of that data to an underlying writer (see NewWriter). +type Writer struct { + d compressor + dict []byte +} + +// Write writes data to w, which will eventually write the +// compressed form of data to its underlying writer. +func (w *Writer) Write(data []byte) (n int, err error) { + return w.d.write(data) +} + +// Flush flushes any pending data to the underlying writer. +// It is useful mainly in compressed network protocols, to ensure that +// a remote reader has enough data to reconstruct a packet. +// Flush does not return until the data has been written. +// Calling Flush when there is no pending data still causes the Writer +// to emit a sync marker of at least 4 bytes. +// If the underlying writer returns an error, Flush returns that error. +// +// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH. +func (w *Writer) Flush() error { + // For more about flushing: + // http://www.bolet.org/~pornin/deflate-flush.html + return w.d.syncFlush() +} + +// Close flushes and closes the writer. +func (w *Writer) Close() error { + return w.d.close() +} + +// Reset discards the writer's state and makes it equivalent to +// the result of NewWriter or NewWriterDict called with dst +// and w's level and dictionary. +func (w *Writer) Reset(dst io.Writer) { + if len(w.dict) > 0 { + // w was created with NewWriterDict + w.d.reset(dst) + if dst != nil { + w.d.fillWindow(w.dict) + } + } else { + // w was created with NewWriter + w.d.reset(dst) + } +} + +// ResetDict discards the writer's state and makes it equivalent to +// the result of NewWriter or NewWriterDict called with dst +// and w's level, but sets a specific dictionary. +func (w *Writer) ResetDict(dst io.Writer, dict []byte) { + w.dict = dict + w.d.reset(dst) + w.d.fillWindow(w.dict) +} diff --git a/vendor/github.com/klauspost/compress/flate/dict_decoder.go b/vendor/github.com/klauspost/compress/flate/dict_decoder.go index bb36351a5a..e66bcfe485 100644 --- a/vendor/github.com/klauspost/compress/flate/dict_decoder.go +++ b/vendor/github.com/klauspost/compress/flate/dict_decoder.go @@ -1,184 +1,184 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flate - -// dictDecoder implements the LZ77 sliding dictionary as used in decompression. -// LZ77 decompresses data through sequences of two forms of commands: -// -// - Literal insertions: Runs of one or more symbols are inserted into the data -// stream as is. This is accomplished through the writeByte method for a -// single symbol, or combinations of writeSlice/writeMark for multiple symbols. -// Any valid stream must start with a literal insertion if no preset dictionary -// is used. -// -// - Backward copies: Runs of one or more symbols are copied from previously -// emitted data. Backward copies come as the tuple (dist, length) where dist -// determines how far back in the stream to copy from and length determines how -// many bytes to copy. Note that it is valid for the length to be greater than -// the distance. Since LZ77 uses forward copies, that situation is used to -// perform a form of run-length encoding on repeated runs of symbols. -// The writeCopy and tryWriteCopy are used to implement this command. -// -// For performance reasons, this implementation performs little to no sanity -// checks about the arguments. As such, the invariants documented for each -// method call must be respected. -type dictDecoder struct { - hist []byte // Sliding window history - - // Invariant: 0 <= rdPos <= wrPos <= len(hist) - wrPos int // Current output position in buffer - rdPos int // Have emitted hist[:rdPos] already - full bool // Has a full window length been written yet? -} - -// init initializes dictDecoder to have a sliding window dictionary of the given -// size. If a preset dict is provided, it will initialize the dictionary with -// the contents of dict. -func (dd *dictDecoder) init(size int, dict []byte) { - *dd = dictDecoder{hist: dd.hist} - - if cap(dd.hist) < size { - dd.hist = make([]byte, size) - } - dd.hist = dd.hist[:size] - - if len(dict) > len(dd.hist) { - dict = dict[len(dict)-len(dd.hist):] - } - dd.wrPos = copy(dd.hist, dict) - if dd.wrPos == len(dd.hist) { - dd.wrPos = 0 - dd.full = true - } - dd.rdPos = dd.wrPos -} - -// histSize reports the total amount of historical data in the dictionary. -func (dd *dictDecoder) histSize() int { - if dd.full { - return len(dd.hist) - } - return dd.wrPos -} - -// availRead reports the number of bytes that can be flushed by readFlush. -func (dd *dictDecoder) availRead() int { - return dd.wrPos - dd.rdPos -} - -// availWrite reports the available amount of output buffer space. -func (dd *dictDecoder) availWrite() int { - return len(dd.hist) - dd.wrPos -} - -// writeSlice returns a slice of the available buffer to write data to. -// -// This invariant will be kept: len(s) <= availWrite() -func (dd *dictDecoder) writeSlice() []byte { - return dd.hist[dd.wrPos:] -} - -// writeMark advances the writer pointer by cnt. -// -// This invariant must be kept: 0 <= cnt <= availWrite() -func (dd *dictDecoder) writeMark(cnt int) { - dd.wrPos += cnt -} - -// writeByte writes a single byte to the dictionary. -// -// This invariant must be kept: 0 < availWrite() -func (dd *dictDecoder) writeByte(c byte) { - dd.hist[dd.wrPos] = c - dd.wrPos++ -} - -// writeCopy copies a string at a given (dist, length) to the output. -// This returns the number of bytes copied and may be less than the requested -// length if the available space in the output buffer is too small. -// -// This invariant must be kept: 0 < dist <= histSize() -func (dd *dictDecoder) writeCopy(dist, length int) int { - dstBase := dd.wrPos - dstPos := dstBase - srcPos := dstPos - dist - endPos := dstPos + length - if endPos > len(dd.hist) { - endPos = len(dd.hist) - } - - // Copy non-overlapping section after destination position. - // - // This section is non-overlapping in that the copy length for this section - // is always less than or equal to the backwards distance. This can occur - // if a distance refers to data that wraps-around in the buffer. - // Thus, a backwards copy is performed here; that is, the exact bytes in - // the source prior to the copy is placed in the destination. - if srcPos < 0 { - srcPos += len(dd.hist) - dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:]) - srcPos = 0 - } - - // Copy possibly overlapping section before destination position. - // - // This section can overlap if the copy length for this section is larger - // than the backwards distance. This is allowed by LZ77 so that repeated - // strings can be succinctly represented using (dist, length) pairs. - // Thus, a forwards copy is performed here; that is, the bytes copied is - // possibly dependent on the resulting bytes in the destination as the copy - // progresses along. This is functionally equivalent to the following: - // - // for i := 0; i < endPos-dstPos; i++ { - // dd.hist[dstPos+i] = dd.hist[srcPos+i] - // } - // dstPos = endPos - // - for dstPos < endPos { - dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos]) - } - - dd.wrPos = dstPos - return dstPos - dstBase -} - -// tryWriteCopy tries to copy a string at a given (distance, length) to the -// output. This specialized version is optimized for short distances. -// -// This method is designed to be inlined for performance reasons. -// -// This invariant must be kept: 0 < dist <= histSize() -func (dd *dictDecoder) tryWriteCopy(dist, length int) int { - dstPos := dd.wrPos - endPos := dstPos + length - if dstPos < dist || endPos > len(dd.hist) { - return 0 - } - dstBase := dstPos - srcPos := dstPos - dist - - // Copy possibly overlapping section before destination position. -loop: - dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos]) - if dstPos < endPos { - goto loop // Avoid for-loop so that this function can be inlined - } - - dd.wrPos = dstPos - return dstPos - dstBase -} - -// readFlush returns a slice of the historical buffer that is ready to be -// emitted to the user. The data returned by readFlush must be fully consumed -// before calling any other dictDecoder methods. -func (dd *dictDecoder) readFlush() []byte { - toRead := dd.hist[dd.rdPos:dd.wrPos] - dd.rdPos = dd.wrPos - if dd.wrPos == len(dd.hist) { - dd.wrPos, dd.rdPos = 0, 0 - dd.full = true - } - return toRead -} +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +// dictDecoder implements the LZ77 sliding dictionary as used in decompression. +// LZ77 decompresses data through sequences of two forms of commands: +// +// - Literal insertions: Runs of one or more symbols are inserted into the data +// stream as is. This is accomplished through the writeByte method for a +// single symbol, or combinations of writeSlice/writeMark for multiple symbols. +// Any valid stream must start with a literal insertion if no preset dictionary +// is used. +// +// - Backward copies: Runs of one or more symbols are copied from previously +// emitted data. Backward copies come as the tuple (dist, length) where dist +// determines how far back in the stream to copy from and length determines how +// many bytes to copy. Note that it is valid for the length to be greater than +// the distance. Since LZ77 uses forward copies, that situation is used to +// perform a form of run-length encoding on repeated runs of symbols. +// The writeCopy and tryWriteCopy are used to implement this command. +// +// For performance reasons, this implementation performs little to no sanity +// checks about the arguments. As such, the invariants documented for each +// method call must be respected. +type dictDecoder struct { + hist []byte // Sliding window history + + // Invariant: 0 <= rdPos <= wrPos <= len(hist) + wrPos int // Current output position in buffer + rdPos int // Have emitted hist[:rdPos] already + full bool // Has a full window length been written yet? +} + +// init initializes dictDecoder to have a sliding window dictionary of the given +// size. If a preset dict is provided, it will initialize the dictionary with +// the contents of dict. +func (dd *dictDecoder) init(size int, dict []byte) { + *dd = dictDecoder{hist: dd.hist} + + if cap(dd.hist) < size { + dd.hist = make([]byte, size) + } + dd.hist = dd.hist[:size] + + if len(dict) > len(dd.hist) { + dict = dict[len(dict)-len(dd.hist):] + } + dd.wrPos = copy(dd.hist, dict) + if dd.wrPos == len(dd.hist) { + dd.wrPos = 0 + dd.full = true + } + dd.rdPos = dd.wrPos +} + +// histSize reports the total amount of historical data in the dictionary. +func (dd *dictDecoder) histSize() int { + if dd.full { + return len(dd.hist) + } + return dd.wrPos +} + +// availRead reports the number of bytes that can be flushed by readFlush. +func (dd *dictDecoder) availRead() int { + return dd.wrPos - dd.rdPos +} + +// availWrite reports the available amount of output buffer space. +func (dd *dictDecoder) availWrite() int { + return len(dd.hist) - dd.wrPos +} + +// writeSlice returns a slice of the available buffer to write data to. +// +// This invariant will be kept: len(s) <= availWrite() +func (dd *dictDecoder) writeSlice() []byte { + return dd.hist[dd.wrPos:] +} + +// writeMark advances the writer pointer by cnt. +// +// This invariant must be kept: 0 <= cnt <= availWrite() +func (dd *dictDecoder) writeMark(cnt int) { + dd.wrPos += cnt +} + +// writeByte writes a single byte to the dictionary. +// +// This invariant must be kept: 0 < availWrite() +func (dd *dictDecoder) writeByte(c byte) { + dd.hist[dd.wrPos] = c + dd.wrPos++ +} + +// writeCopy copies a string at a given (dist, length) to the output. +// This returns the number of bytes copied and may be less than the requested +// length if the available space in the output buffer is too small. +// +// This invariant must be kept: 0 < dist <= histSize() +func (dd *dictDecoder) writeCopy(dist, length int) int { + dstBase := dd.wrPos + dstPos := dstBase + srcPos := dstPos - dist + endPos := dstPos + length + if endPos > len(dd.hist) { + endPos = len(dd.hist) + } + + // Copy non-overlapping section after destination position. + // + // This section is non-overlapping in that the copy length for this section + // is always less than or equal to the backwards distance. This can occur + // if a distance refers to data that wraps-around in the buffer. + // Thus, a backwards copy is performed here; that is, the exact bytes in + // the source prior to the copy is placed in the destination. + if srcPos < 0 { + srcPos += len(dd.hist) + dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:]) + srcPos = 0 + } + + // Copy possibly overlapping section before destination position. + // + // This section can overlap if the copy length for this section is larger + // than the backwards distance. This is allowed by LZ77 so that repeated + // strings can be succinctly represented using (dist, length) pairs. + // Thus, a forwards copy is performed here; that is, the bytes copied is + // possibly dependent on the resulting bytes in the destination as the copy + // progresses along. This is functionally equivalent to the following: + // + // for i := 0; i < endPos-dstPos; i++ { + // dd.hist[dstPos+i] = dd.hist[srcPos+i] + // } + // dstPos = endPos + // + for dstPos < endPos { + dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos]) + } + + dd.wrPos = dstPos + return dstPos - dstBase +} + +// tryWriteCopy tries to copy a string at a given (distance, length) to the +// output. This specialized version is optimized for short distances. +// +// This method is designed to be inlined for performance reasons. +// +// This invariant must be kept: 0 < dist <= histSize() +func (dd *dictDecoder) tryWriteCopy(dist, length int) int { + dstPos := dd.wrPos + endPos := dstPos + length + if dstPos < dist || endPos > len(dd.hist) { + return 0 + } + dstBase := dstPos + srcPos := dstPos - dist + + // Copy possibly overlapping section before destination position. +loop: + dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos]) + if dstPos < endPos { + goto loop // Avoid for-loop so that this function can be inlined + } + + dd.wrPos = dstPos + return dstPos - dstBase +} + +// readFlush returns a slice of the historical buffer that is ready to be +// emitted to the user. The data returned by readFlush must be fully consumed +// before calling any other dictDecoder methods. +func (dd *dictDecoder) readFlush() []byte { + toRead := dd.hist[dd.rdPos:dd.wrPos] + dd.rdPos = dd.wrPos + if dd.wrPos == len(dd.hist) { + dd.wrPos, dd.rdPos = 0, 0 + dd.full = true + } + return toRead +} diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go index 24caf5f70b..ac3265901a 100644 --- a/vendor/github.com/klauspost/compress/flate/fast_encoder.go +++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go @@ -1,216 +1,216 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Modified for deflate by Klaus Post (c) 2015. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flate - -import ( - "encoding/binary" - "fmt" - "math/bits" -) - -type fastEnc interface { - Encode(dst *tokens, src []byte) - Reset() -} - -func newFastEnc(level int) fastEnc { - switch level { - case 1: - return &fastEncL1{fastGen: fastGen{cur: maxStoreBlockSize}} - case 2: - return &fastEncL2{fastGen: fastGen{cur: maxStoreBlockSize}} - case 3: - return &fastEncL3{fastGen: fastGen{cur: maxStoreBlockSize}} - case 4: - return &fastEncL4{fastGen: fastGen{cur: maxStoreBlockSize}} - case 5: - return &fastEncL5{fastGen: fastGen{cur: maxStoreBlockSize}} - case 6: - return &fastEncL6{fastGen: fastGen{cur: maxStoreBlockSize}} - default: - panic("invalid level specified") - } -} - -const ( - tableBits = 15 // Bits used in the table - tableSize = 1 << tableBits // Size of the table - tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32. - baseMatchOffset = 1 // The smallest match offset - baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5 - maxMatchOffset = 1 << 15 // The largest match offset - - bTableBits = 17 // Bits used in the big tables - bTableSize = 1 << bTableBits // Size of the table - allocHistory = maxStoreBlockSize * 5 // Size to preallocate for history. - bufferReset = (1 << 31) - allocHistory - maxStoreBlockSize - 1 // Reset the buffer offset when reaching this. -) - -const ( - prime3bytes = 506832829 - prime4bytes = 2654435761 - prime5bytes = 889523592379 - prime6bytes = 227718039650203 - prime7bytes = 58295818150454627 - prime8bytes = 0xcf1bbcdcb7a56463 -) - -func load3232(b []byte, i int32) uint32 { - return binary.LittleEndian.Uint32(b[i:]) -} - -func load6432(b []byte, i int32) uint64 { - return binary.LittleEndian.Uint64(b[i:]) -} - -type tableEntry struct { - offset int32 -} - -// fastGen maintains the table for matches, -// and the previous byte block for level 2. -// This is the generic implementation. -type fastGen struct { - hist []byte - cur int32 -} - -func (e *fastGen) addBlock(src []byte) int32 { - // check if we have space already - if len(e.hist)+len(src) > cap(e.hist) { - if cap(e.hist) == 0 { - e.hist = make([]byte, 0, allocHistory) - } else { - if cap(e.hist) < maxMatchOffset*2 { - panic("unexpected buffer size") - } - // Move down - offset := int32(len(e.hist)) - maxMatchOffset - // copy(e.hist[0:maxMatchOffset], e.hist[offset:]) - *(*[maxMatchOffset]byte)(e.hist) = *(*[maxMatchOffset]byte)(e.hist[offset:]) - e.cur += offset - e.hist = e.hist[:maxMatchOffset] - } - } - s := int32(len(e.hist)) - e.hist = append(e.hist, src...) - return s -} - -type tableEntryPrev struct { - Cur tableEntry - Prev tableEntry -} - -// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <64. -func hash7(u uint64, h uint8) uint32 { - return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & reg8SizeMask64)) -} - -// hashLen returns a hash of the lowest mls bytes of with length output bits. -// mls must be >=3 and <=8. Any other value will return hash for 4 bytes. -// length should always be < 32. -// Preferably length and mls should be a constant for inlining. -func hashLen(u uint64, length, mls uint8) uint32 { - switch mls { - case 3: - return (uint32(u<<8) * prime3bytes) >> (32 - length) - case 5: - return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length)) - case 6: - return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length)) - case 7: - return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length)) - case 8: - return uint32((u * prime8bytes) >> (64 - length)) - default: - return (uint32(u) * prime4bytes) >> (32 - length) - } -} - -// matchlen will return the match length between offsets and t in src. -// The maximum length returned is maxMatchLength - 4. -// It is assumed that s > t, that t >=0 and s < len(src). -func (e *fastGen) matchlen(s, t int32, src []byte) int32 { - if debugDecode { - if t >= s { - panic(fmt.Sprint("t >=s:", t, s)) - } - if int(s) >= len(src) { - panic(fmt.Sprint("s >= len(src):", s, len(src))) - } - if t < 0 { - panic(fmt.Sprint("t < 0:", t)) - } - if s-t > maxMatchOffset { - panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) - } - } - s1 := int(s) + maxMatchLength - 4 - if s1 > len(src) { - s1 = len(src) - } - - // Extend the match to be as long as possible. - return int32(matchLen(src[s:s1], src[t:])) -} - -// matchlenLong will return the match length between offsets and t in src. -// It is assumed that s > t, that t >=0 and s < len(src). -func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 { - if debugDeflate { - if t >= s { - panic(fmt.Sprint("t >=s:", t, s)) - } - if int(s) >= len(src) { - panic(fmt.Sprint("s >= len(src):", s, len(src))) - } - if t < 0 { - panic(fmt.Sprint("t < 0:", t)) - } - if s-t > maxMatchOffset { - panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) - } - } - // Extend the match to be as long as possible. - return int32(matchLen(src[s:], src[t:])) -} - -// Reset the encoding table. -func (e *fastGen) Reset() { - if cap(e.hist) < allocHistory { - e.hist = make([]byte, 0, allocHistory) - } - // We offset current position so everything will be out of reach. - // If we are above the buffer reset it will be cleared anyway since len(hist) == 0. - if e.cur <= bufferReset { - e.cur += maxMatchOffset + int32(len(e.hist)) - } - e.hist = e.hist[:0] -} - -// matchLen returns the maximum length. -// 'a' must be the shortest of the two. -func matchLen(a, b []byte) int { - var checked int - - for len(a) >= 8 { - if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 { - return checked + (bits.TrailingZeros64(diff) >> 3) - } - checked += 8 - a = a[8:] - b = b[8:] - } - b = b[:len(a)] - for i := range a { - if a[i] != b[i] { - return i + checked - } - } - return len(a) + checked -} +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Modified for deflate by Klaus Post (c) 2015. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "encoding/binary" + "fmt" + "math/bits" +) + +type fastEnc interface { + Encode(dst *tokens, src []byte) + Reset() +} + +func newFastEnc(level int) fastEnc { + switch level { + case 1: + return &fastEncL1{fastGen: fastGen{cur: maxStoreBlockSize}} + case 2: + return &fastEncL2{fastGen: fastGen{cur: maxStoreBlockSize}} + case 3: + return &fastEncL3{fastGen: fastGen{cur: maxStoreBlockSize}} + case 4: + return &fastEncL4{fastGen: fastGen{cur: maxStoreBlockSize}} + case 5: + return &fastEncL5{fastGen: fastGen{cur: maxStoreBlockSize}} + case 6: + return &fastEncL6{fastGen: fastGen{cur: maxStoreBlockSize}} + default: + panic("invalid level specified") + } +} + +const ( + tableBits = 15 // Bits used in the table + tableSize = 1 << tableBits // Size of the table + tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32. + baseMatchOffset = 1 // The smallest match offset + baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5 + maxMatchOffset = 1 << 15 // The largest match offset + + bTableBits = 17 // Bits used in the big tables + bTableSize = 1 << bTableBits // Size of the table + allocHistory = maxStoreBlockSize * 5 // Size to preallocate for history. + bufferReset = (1 << 31) - allocHistory - maxStoreBlockSize - 1 // Reset the buffer offset when reaching this. +) + +const ( + prime3bytes = 506832829 + prime4bytes = 2654435761 + prime5bytes = 889523592379 + prime6bytes = 227718039650203 + prime7bytes = 58295818150454627 + prime8bytes = 0xcf1bbcdcb7a56463 +) + +func load3232(b []byte, i int32) uint32 { + return binary.LittleEndian.Uint32(b[i:]) +} + +func load6432(b []byte, i int32) uint64 { + return binary.LittleEndian.Uint64(b[i:]) +} + +type tableEntry struct { + offset int32 +} + +// fastGen maintains the table for matches, +// and the previous byte block for level 2. +// This is the generic implementation. +type fastGen struct { + hist []byte + cur int32 +} + +func (e *fastGen) addBlock(src []byte) int32 { + // check if we have space already + if len(e.hist)+len(src) > cap(e.hist) { + if cap(e.hist) == 0 { + e.hist = make([]byte, 0, allocHistory) + } else { + if cap(e.hist) < maxMatchOffset*2 { + panic("unexpected buffer size") + } + // Move down + offset := int32(len(e.hist)) - maxMatchOffset + // copy(e.hist[0:maxMatchOffset], e.hist[offset:]) + *(*[maxMatchOffset]byte)(e.hist) = *(*[maxMatchOffset]byte)(e.hist[offset:]) + e.cur += offset + e.hist = e.hist[:maxMatchOffset] + } + } + s := int32(len(e.hist)) + e.hist = append(e.hist, src...) + return s +} + +type tableEntryPrev struct { + Cur tableEntry + Prev tableEntry +} + +// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits. +// Preferably h should be a constant and should always be <64. +func hash7(u uint64, h uint8) uint32 { + return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & reg8SizeMask64)) +} + +// hashLen returns a hash of the lowest mls bytes of with length output bits. +// mls must be >=3 and <=8. Any other value will return hash for 4 bytes. +// length should always be < 32. +// Preferably length and mls should be a constant for inlining. +func hashLen(u uint64, length, mls uint8) uint32 { + switch mls { + case 3: + return (uint32(u<<8) * prime3bytes) >> (32 - length) + case 5: + return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length)) + case 6: + return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length)) + case 7: + return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length)) + case 8: + return uint32((u * prime8bytes) >> (64 - length)) + default: + return (uint32(u) * prime4bytes) >> (32 - length) + } +} + +// matchlen will return the match length between offsets and t in src. +// The maximum length returned is maxMatchLength - 4. +// It is assumed that s > t, that t >=0 and s < len(src). +func (e *fastGen) matchlen(s, t int32, src []byte) int32 { + if debugDecode { + if t >= s { + panic(fmt.Sprint("t >=s:", t, s)) + } + if int(s) >= len(src) { + panic(fmt.Sprint("s >= len(src):", s, len(src))) + } + if t < 0 { + panic(fmt.Sprint("t < 0:", t)) + } + if s-t > maxMatchOffset { + panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) + } + } + s1 := int(s) + maxMatchLength - 4 + if s1 > len(src) { + s1 = len(src) + } + + // Extend the match to be as long as possible. + return int32(matchLen(src[s:s1], src[t:])) +} + +// matchlenLong will return the match length between offsets and t in src. +// It is assumed that s > t, that t >=0 and s < len(src). +func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 { + if debugDeflate { + if t >= s { + panic(fmt.Sprint("t >=s:", t, s)) + } + if int(s) >= len(src) { + panic(fmt.Sprint("s >= len(src):", s, len(src))) + } + if t < 0 { + panic(fmt.Sprint("t < 0:", t)) + } + if s-t > maxMatchOffset { + panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) + } + } + // Extend the match to be as long as possible. + return int32(matchLen(src[s:], src[t:])) +} + +// Reset the encoding table. +func (e *fastGen) Reset() { + if cap(e.hist) < allocHistory { + e.hist = make([]byte, 0, allocHistory) + } + // We offset current position so everything will be out of reach. + // If we are above the buffer reset it will be cleared anyway since len(hist) == 0. + if e.cur <= bufferReset { + e.cur += maxMatchOffset + int32(len(e.hist)) + } + e.hist = e.hist[:0] +} + +// matchLen returns the maximum length. +// 'a' must be the shortest of the two. +func matchLen(a, b []byte) int { + var checked int + + for len(a) >= 8 { + if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 { + return checked + (bits.TrailingZeros64(diff) >> 3) + } + checked += 8 + a = a[8:] + b = b[8:] + } + b = b[:len(a)] + for i := range a { + if a[i] != b[i] { + return i + checked + } + } + return len(a) + checked +} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go index 89a5dd89f9..d961101111 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go @@ -1,1187 +1,1187 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flate - -import ( - "encoding/binary" - "fmt" - "io" - "math" -) - -const ( - // The largest offset code. - offsetCodeCount = 30 - - // The special code used to mark the end of a block. - endBlockMarker = 256 - - // The first length code. - lengthCodesStart = 257 - - // The number of codegen codes. - codegenCodeCount = 19 - badCode = 255 - - // maxPredefinedTokens is the maximum number of tokens - // where we check if fixed size is smaller. - maxPredefinedTokens = 250 - - // bufferFlushSize indicates the buffer size - // after which bytes are flushed to the writer. - // Should preferably be a multiple of 6, since - // we accumulate 6 bytes between writes to the buffer. - bufferFlushSize = 246 - - // bufferSize is the actual output byte buffer size. - // It must have additional headroom for a flush - // which can contain up to 8 bytes. - bufferSize = bufferFlushSize + 8 -) - -// Minimum length code that emits bits. -const lengthExtraBitsMinCode = 8 - -// The number of extra bits needed by length code X - LENGTH_CODES_START. -var lengthExtraBits = [32]uint8{ - /* 257 */ 0, 0, 0, - /* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, - /* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, - /* 280 */ 4, 5, 5, 5, 5, 0, -} - -// The length indicated by length code X - LENGTH_CODES_START. -var lengthBase = [32]uint8{ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, - 12, 14, 16, 20, 24, 28, 32, 40, 48, 56, - 64, 80, 96, 112, 128, 160, 192, 224, 255, -} - -// Minimum offset code that emits bits. -const offsetExtraBitsMinCode = 4 - -// offset code word extra bits. -var offsetExtraBits = [32]int8{ - 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, - 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, - 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, - /* extended window */ - 14, 14, -} - -var offsetCombined = [32]uint32{} - -func init() { - var offsetBase = [32]uint32{ - /* normal deflate */ - 0x000000, 0x000001, 0x000002, 0x000003, 0x000004, - 0x000006, 0x000008, 0x00000c, 0x000010, 0x000018, - 0x000020, 0x000030, 0x000040, 0x000060, 0x000080, - 0x0000c0, 0x000100, 0x000180, 0x000200, 0x000300, - 0x000400, 0x000600, 0x000800, 0x000c00, 0x001000, - 0x001800, 0x002000, 0x003000, 0x004000, 0x006000, - - /* extended window */ - 0x008000, 0x00c000, - } - - for i := range offsetCombined[:] { - // Don't use extended window values... - if offsetExtraBits[i] == 0 || offsetBase[i] > 0x006000 { - continue - } - offsetCombined[i] = uint32(offsetExtraBits[i]) | (offsetBase[i] << 8) - } -} - -// The odd order in which the codegen code sizes are written. -var codegenOrder = []uint32{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15} - -type huffmanBitWriter struct { - // writer is the underlying writer. - // Do not use it directly; use the write method, which ensures - // that Write errors are sticky. - writer io.Writer - - // Data waiting to be written is bytes[0:nbytes] - // and then the low nbits of bits. - bits uint64 - nbits uint8 - nbytes uint8 - lastHuffMan bool - literalEncoding *huffmanEncoder - tmpLitEncoding *huffmanEncoder - offsetEncoding *huffmanEncoder - codegenEncoding *huffmanEncoder - err error - lastHeader int - // Set between 0 (reused block can be up to 2x the size) - logNewTablePenalty uint - bytes [256 + 8]byte - literalFreq [lengthCodesStart + 32]uint16 - offsetFreq [32]uint16 - codegenFreq [codegenCodeCount]uint16 - - // codegen must have an extra space for the final symbol. - codegen [literalCount + offsetCodeCount + 1]uint8 -} - -// Huffman reuse. -// -// The huffmanBitWriter supports reusing huffman tables and thereby combining block sections. -// -// This is controlled by several variables: -// -// If lastHeader is non-zero the Huffman table can be reused. -// This also indicates that a Huffman table has been generated that can output all -// possible symbols. -// It also indicates that an EOB has not yet been emitted, so if a new tabel is generated -// an EOB with the previous table must be written. -// -// If lastHuffMan is set, a table for outputting literals has been generated and offsets are invalid. -// -// An incoming block estimates the output size of a new table using a 'fresh' by calculating the -// optimal size and adding a penalty in 'logNewTablePenalty'. -// A Huffman table is not optimal, which is why we add a penalty, and generating a new table -// is slower both for compression and decompression. - -func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter { - return &huffmanBitWriter{ - writer: w, - literalEncoding: newHuffmanEncoder(literalCount), - tmpLitEncoding: newHuffmanEncoder(literalCount), - codegenEncoding: newHuffmanEncoder(codegenCodeCount), - offsetEncoding: newHuffmanEncoder(offsetCodeCount), - } -} - -func (w *huffmanBitWriter) reset(writer io.Writer) { - w.writer = writer - w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil - w.lastHeader = 0 - w.lastHuffMan = false -} - -func (w *huffmanBitWriter) canReuse(t *tokens) (ok bool) { - a := t.offHist[:offsetCodeCount] - b := w.offsetEncoding.codes - b = b[:len(a)] - for i, v := range a { - if v != 0 && b[i].zero() { - return false - } - } - - a = t.extraHist[:literalCount-256] - b = w.literalEncoding.codes[256:literalCount] - b = b[:len(a)] - for i, v := range a { - if v != 0 && b[i].zero() { - return false - } - } - - a = t.litHist[:256] - b = w.literalEncoding.codes[:len(a)] - for i, v := range a { - if v != 0 && b[i].zero() { - return false - } - } - return true -} - -func (w *huffmanBitWriter) flush() { - if w.err != nil { - w.nbits = 0 - return - } - if w.lastHeader > 0 { - // We owe an EOB - w.writeCode(w.literalEncoding.codes[endBlockMarker]) - w.lastHeader = 0 - } - n := w.nbytes - for w.nbits != 0 { - w.bytes[n] = byte(w.bits) - w.bits >>= 8 - if w.nbits > 8 { // Avoid underflow - w.nbits -= 8 - } else { - w.nbits = 0 - } - n++ - } - w.bits = 0 - w.write(w.bytes[:n]) - w.nbytes = 0 -} - -func (w *huffmanBitWriter) write(b []byte) { - if w.err != nil { - return - } - _, w.err = w.writer.Write(b) -} - -func (w *huffmanBitWriter) writeBits(b int32, nb uint8) { - w.bits |= uint64(b) << (w.nbits & 63) - w.nbits += nb - if w.nbits >= 48 { - w.writeOutBits() - } -} - -func (w *huffmanBitWriter) writeBytes(bytes []byte) { - if w.err != nil { - return - } - n := w.nbytes - if w.nbits&7 != 0 { - w.err = InternalError("writeBytes with unfinished bits") - return - } - for w.nbits != 0 { - w.bytes[n] = byte(w.bits) - w.bits >>= 8 - w.nbits -= 8 - n++ - } - if n != 0 { - w.write(w.bytes[:n]) - } - w.nbytes = 0 - w.write(bytes) -} - -// RFC 1951 3.2.7 specifies a special run-length encoding for specifying -// the literal and offset lengths arrays (which are concatenated into a single -// array). This method generates that run-length encoding. -// -// The result is written into the codegen array, and the frequencies -// of each code is written into the codegenFreq array. -// Codes 0-15 are single byte codes. Codes 16-18 are followed by additional -// information. Code badCode is an end marker -// -// numLiterals The number of literals in literalEncoding -// numOffsets The number of offsets in offsetEncoding -// litenc, offenc The literal and offset encoder to use -func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litEnc, offEnc *huffmanEncoder) { - for i := range w.codegenFreq { - w.codegenFreq[i] = 0 - } - // Note that we are using codegen both as a temporary variable for holding - // a copy of the frequencies, and as the place where we put the result. - // This is fine because the output is always shorter than the input used - // so far. - codegen := w.codegen[:] // cache - // Copy the concatenated code sizes to codegen. Put a marker at the end. - cgnl := codegen[:numLiterals] - for i := range cgnl { - cgnl[i] = litEnc.codes[i].len() - } - - cgnl = codegen[numLiterals : numLiterals+numOffsets] - for i := range cgnl { - cgnl[i] = offEnc.codes[i].len() - } - codegen[numLiterals+numOffsets] = badCode - - size := codegen[0] - count := 1 - outIndex := 0 - for inIndex := 1; size != badCode; inIndex++ { - // INVARIANT: We have seen "count" copies of size that have not yet - // had output generated for them. - nextSize := codegen[inIndex] - if nextSize == size { - count++ - continue - } - // We need to generate codegen indicating "count" of size. - if size != 0 { - codegen[outIndex] = size - outIndex++ - w.codegenFreq[size]++ - count-- - for count >= 3 { - n := 6 - if n > count { - n = count - } - codegen[outIndex] = 16 - outIndex++ - codegen[outIndex] = uint8(n - 3) - outIndex++ - w.codegenFreq[16]++ - count -= n - } - } else { - for count >= 11 { - n := 138 - if n > count { - n = count - } - codegen[outIndex] = 18 - outIndex++ - codegen[outIndex] = uint8(n - 11) - outIndex++ - w.codegenFreq[18]++ - count -= n - } - if count >= 3 { - // count >= 3 && count <= 10 - codegen[outIndex] = 17 - outIndex++ - codegen[outIndex] = uint8(count - 3) - outIndex++ - w.codegenFreq[17]++ - count = 0 - } - } - count-- - for ; count >= 0; count-- { - codegen[outIndex] = size - outIndex++ - w.codegenFreq[size]++ - } - // Set up invariant for next time through the loop. - size = nextSize - count = 1 - } - // Marker indicating the end of the codegen. - codegen[outIndex] = badCode -} - -func (w *huffmanBitWriter) codegens() int { - numCodegens := len(w.codegenFreq) - for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 { - numCodegens-- - } - return numCodegens -} - -func (w *huffmanBitWriter) headerSize() (size, numCodegens int) { - numCodegens = len(w.codegenFreq) - for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 { - numCodegens-- - } - return 3 + 5 + 5 + 4 + (3 * numCodegens) + - w.codegenEncoding.bitLength(w.codegenFreq[:]) + - int(w.codegenFreq[16])*2 + - int(w.codegenFreq[17])*3 + - int(w.codegenFreq[18])*7, numCodegens -} - -// dynamicSize returns the size of dynamically encoded data in bits. -func (w *huffmanBitWriter) dynamicReuseSize(litEnc, offEnc *huffmanEncoder) (size int) { - size = litEnc.bitLength(w.literalFreq[:]) + - offEnc.bitLength(w.offsetFreq[:]) - return size -} - -// dynamicSize returns the size of dynamically encoded data in bits. -func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) { - header, numCodegens := w.headerSize() - size = header + - litEnc.bitLength(w.literalFreq[:]) + - offEnc.bitLength(w.offsetFreq[:]) + - extraBits - return size, numCodegens -} - -// extraBitSize will return the number of bits that will be written -// as "extra" bits on matches. -func (w *huffmanBitWriter) extraBitSize() int { - total := 0 - for i, n := range w.literalFreq[257:literalCount] { - total += int(n) * int(lengthExtraBits[i&31]) - } - for i, n := range w.offsetFreq[:offsetCodeCount] { - total += int(n) * int(offsetExtraBits[i&31]) - } - return total -} - -// fixedSize returns the size of dynamically encoded data in bits. -func (w *huffmanBitWriter) fixedSize(extraBits int) int { - return 3 + - fixedLiteralEncoding.bitLength(w.literalFreq[:]) + - fixedOffsetEncoding.bitLength(w.offsetFreq[:]) + - extraBits -} - -// storedSize calculates the stored size, including header. -// The function returns the size in bits and whether the block -// fits inside a single block. -func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) { - if in == nil { - return 0, false - } - if len(in) <= maxStoreBlockSize { - return (len(in) + 5) * 8, true - } - return 0, false -} - -func (w *huffmanBitWriter) writeCode(c hcode) { - // The function does not get inlined if we "& 63" the shift. - w.bits |= c.code64() << (w.nbits & 63) - w.nbits += c.len() - if w.nbits >= 48 { - w.writeOutBits() - } -} - -// writeOutBits will write bits to the buffer. -func (w *huffmanBitWriter) writeOutBits() { - bits := w.bits - w.bits >>= 48 - w.nbits -= 48 - n := w.nbytes - - // We over-write, but faster... - binary.LittleEndian.PutUint64(w.bytes[n:], bits) - n += 6 - - if n >= bufferFlushSize { - if w.err != nil { - n = 0 - return - } - w.write(w.bytes[:n]) - n = 0 - } - - w.nbytes = n -} - -// Write the header of a dynamic Huffman block to the output stream. -// -// numLiterals The number of literals specified in codegen -// numOffsets The number of offsets specified in codegen -// numCodegens The number of codegens used in codegen -func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, numCodegens int, isEof bool) { - if w.err != nil { - return - } - var firstBits int32 = 4 - if isEof { - firstBits = 5 - } - w.writeBits(firstBits, 3) - w.writeBits(int32(numLiterals-257), 5) - w.writeBits(int32(numOffsets-1), 5) - w.writeBits(int32(numCodegens-4), 4) - - for i := 0; i < numCodegens; i++ { - value := uint(w.codegenEncoding.codes[codegenOrder[i]].len()) - w.writeBits(int32(value), 3) - } - - i := 0 - for { - var codeWord = uint32(w.codegen[i]) - i++ - if codeWord == badCode { - break - } - w.writeCode(w.codegenEncoding.codes[codeWord]) - - switch codeWord { - case 16: - w.writeBits(int32(w.codegen[i]), 2) - i++ - case 17: - w.writeBits(int32(w.codegen[i]), 3) - i++ - case 18: - w.writeBits(int32(w.codegen[i]), 7) - i++ - } - } -} - -// writeStoredHeader will write a stored header. -// If the stored block is only used for EOF, -// it is replaced with a fixed huffman block. -func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) { - if w.err != nil { - return - } - if w.lastHeader > 0 { - // We owe an EOB - w.writeCode(w.literalEncoding.codes[endBlockMarker]) - w.lastHeader = 0 - } - - // To write EOF, use a fixed encoding block. 10 bits instead of 5 bytes. - if length == 0 && isEof { - w.writeFixedHeader(isEof) - // EOB: 7 bits, value: 0 - w.writeBits(0, 7) - w.flush() - return - } - - var flag int32 - if isEof { - flag = 1 - } - w.writeBits(flag, 3) - w.flush() - w.writeBits(int32(length), 16) - w.writeBits(int32(^uint16(length)), 16) -} - -func (w *huffmanBitWriter) writeFixedHeader(isEof bool) { - if w.err != nil { - return - } - if w.lastHeader > 0 { - // We owe an EOB - w.writeCode(w.literalEncoding.codes[endBlockMarker]) - w.lastHeader = 0 - } - - // Indicate that we are a fixed Huffman block - var value int32 = 2 - if isEof { - value = 3 - } - w.writeBits(value, 3) -} - -// writeBlock will write a block of tokens with the smallest encoding. -// The original input can be supplied, and if the huffman encoded data -// is larger than the original bytes, the data will be written as a -// stored block. -// If the input is nil, the tokens will always be Huffman encoded. -func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) { - if w.err != nil { - return - } - - tokens.AddEOB() - if w.lastHeader > 0 { - // We owe an EOB - w.writeCode(w.literalEncoding.codes[endBlockMarker]) - w.lastHeader = 0 - } - numLiterals, numOffsets := w.indexTokens(tokens, false) - w.generate() - var extraBits int - storedSize, storable := w.storedSize(input) - if storable { - extraBits = w.extraBitSize() - } - - // Figure out smallest code. - // Fixed Huffman baseline. - var literalEncoding = fixedLiteralEncoding - var offsetEncoding = fixedOffsetEncoding - var size = math.MaxInt32 - if tokens.n < maxPredefinedTokens { - size = w.fixedSize(extraBits) - } - - // Dynamic Huffman? - var numCodegens int - - // Generate codegen and codegenFrequencies, which indicates how to encode - // the literalEncoding and the offsetEncoding. - w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding) - w.codegenEncoding.generate(w.codegenFreq[:], 7) - dynamicSize, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits) - - if dynamicSize < size { - size = dynamicSize - literalEncoding = w.literalEncoding - offsetEncoding = w.offsetEncoding - } - - // Stored bytes? - if storable && storedSize <= size { - w.writeStoredHeader(len(input), eof) - w.writeBytes(input) - return - } - - // Huffman. - if literalEncoding == fixedLiteralEncoding { - w.writeFixedHeader(eof) - } else { - w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) - } - - // Write the tokens. - w.writeTokens(tokens.Slice(), literalEncoding.codes, offsetEncoding.codes) -} - -// writeBlockDynamic encodes a block using a dynamic Huffman table. -// This should be used if the symbols used have a disproportionate -// histogram distribution. -// If input is supplied and the compression savings are below 1/16th of the -// input size the block is stored. -func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []byte, sync bool) { - if w.err != nil { - return - } - - sync = sync || eof - if sync { - tokens.AddEOB() - } - - // We cannot reuse pure huffman table, and must mark as EOF. - if (w.lastHuffMan || eof) && w.lastHeader > 0 { - // We will not try to reuse. - w.writeCode(w.literalEncoding.codes[endBlockMarker]) - w.lastHeader = 0 - w.lastHuffMan = false - } - - // fillReuse enables filling of empty values. - // This will make encodings always reusable without testing. - // However, this does not appear to benefit on most cases. - const fillReuse = false - - // Check if we can reuse... - if !fillReuse && w.lastHeader > 0 && !w.canReuse(tokens) { - w.writeCode(w.literalEncoding.codes[endBlockMarker]) - w.lastHeader = 0 - } - - numLiterals, numOffsets := w.indexTokens(tokens, !sync) - extraBits := 0 - ssize, storable := w.storedSize(input) - - const usePrefs = true - if storable || w.lastHeader > 0 { - extraBits = w.extraBitSize() - } - - var size int - - // Check if we should reuse. - if w.lastHeader > 0 { - // Estimate size for using a new table. - // Use the previous header size as the best estimate. - newSize := w.lastHeader + tokens.EstimatedBits() - newSize += int(w.literalEncoding.codes[endBlockMarker].len()) + newSize>>w.logNewTablePenalty - - // The estimated size is calculated as an optimal table. - // We add a penalty to make it more realistic and re-use a bit more. - reuseSize := w.dynamicReuseSize(w.literalEncoding, w.offsetEncoding) + extraBits - - // Check if a new table is better. - if newSize < reuseSize { - // Write the EOB we owe. - w.writeCode(w.literalEncoding.codes[endBlockMarker]) - size = newSize - w.lastHeader = 0 - } else { - size = reuseSize - } - - if tokens.n < maxPredefinedTokens { - if preSize := w.fixedSize(extraBits) + 7; usePrefs && preSize < size { - // Check if we get a reasonable size decrease. - if storable && ssize <= size { - w.writeStoredHeader(len(input), eof) - w.writeBytes(input) - return - } - w.writeFixedHeader(eof) - if !sync { - tokens.AddEOB() - } - w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes) - return - } - } - // Check if we get a reasonable size decrease. - if storable && ssize <= size { - w.writeStoredHeader(len(input), eof) - w.writeBytes(input) - return - } - } - - // We want a new block/table - if w.lastHeader == 0 { - if fillReuse && !sync { - w.fillTokens() - numLiterals, numOffsets = maxNumLit, maxNumDist - } else { - w.literalFreq[endBlockMarker] = 1 - } - - w.generate() - // Generate codegen and codegenFrequencies, which indicates how to encode - // the literalEncoding and the offsetEncoding. - w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding) - w.codegenEncoding.generate(w.codegenFreq[:], 7) - - var numCodegens int - if fillReuse && !sync { - // Reindex for accurate size... - w.indexTokens(tokens, true) - } - size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits) - - // Store predefined, if we don't get a reasonable improvement. - if tokens.n < maxPredefinedTokens { - if preSize := w.fixedSize(extraBits); usePrefs && preSize <= size { - // Store bytes, if we don't get an improvement. - if storable && ssize <= preSize { - w.writeStoredHeader(len(input), eof) - w.writeBytes(input) - return - } - w.writeFixedHeader(eof) - if !sync { - tokens.AddEOB() - } - w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes) - return - } - } - - if storable && ssize <= size { - // Store bytes, if we don't get an improvement. - w.writeStoredHeader(len(input), eof) - w.writeBytes(input) - return - } - - // Write Huffman table. - w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) - if !sync { - w.lastHeader, _ = w.headerSize() - } - w.lastHuffMan = false - } - - if sync { - w.lastHeader = 0 - } - // Write the tokens. - w.writeTokens(tokens.Slice(), w.literalEncoding.codes, w.offsetEncoding.codes) -} - -func (w *huffmanBitWriter) fillTokens() { - for i, v := range w.literalFreq[:literalCount] { - if v == 0 { - w.literalFreq[i] = 1 - } - } - for i, v := range w.offsetFreq[:offsetCodeCount] { - if v == 0 { - w.offsetFreq[i] = 1 - } - } -} - -// indexTokens indexes a slice of tokens, and updates -// literalFreq and offsetFreq, and generates literalEncoding -// and offsetEncoding. -// The number of literal and offset tokens is returned. -func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, numOffsets int) { - //copy(w.literalFreq[:], t.litHist[:]) - *(*[256]uint16)(w.literalFreq[:]) = t.litHist - //copy(w.literalFreq[256:], t.extraHist[:]) - *(*[32]uint16)(w.literalFreq[256:]) = t.extraHist - w.offsetFreq = t.offHist - - if t.n == 0 { - return - } - if filled { - return maxNumLit, maxNumDist - } - // get the number of literals - numLiterals = len(w.literalFreq) - for w.literalFreq[numLiterals-1] == 0 { - numLiterals-- - } - // get the number of offsets - numOffsets = len(w.offsetFreq) - for numOffsets > 0 && w.offsetFreq[numOffsets-1] == 0 { - numOffsets-- - } - if numOffsets == 0 { - // We haven't found a single match. If we want to go with the dynamic encoding, - // we should count at least one offset to be sure that the offset huffman tree could be encoded. - w.offsetFreq[0] = 1 - numOffsets = 1 - } - return -} - -func (w *huffmanBitWriter) generate() { - w.literalEncoding.generate(w.literalFreq[:literalCount], 15) - w.offsetEncoding.generate(w.offsetFreq[:offsetCodeCount], 15) -} - -// writeTokens writes a slice of tokens to the output. -// codes for literal and offset encoding must be supplied. -func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) { - if w.err != nil { - return - } - if len(tokens) == 0 { - return - } - - // Only last token should be endBlockMarker. - var deferEOB bool - if tokens[len(tokens)-1] == endBlockMarker { - tokens = tokens[:len(tokens)-1] - deferEOB = true - } - - // Create slices up to the next power of two to avoid bounds checks. - lits := leCodes[:256] - offs := oeCodes[:32] - lengths := leCodes[lengthCodesStart:] - lengths = lengths[:32] - - // Go 1.16 LOVES having these on stack. - bits, nbits, nbytes := w.bits, w.nbits, w.nbytes - - for _, t := range tokens { - if t < 256 { - //w.writeCode(lits[t.literal()]) - c := lits[t] - bits |= c.code64() << (nbits & 63) - nbits += c.len() - if nbits >= 48 { - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits - bits >>= 48 - nbits -= 48 - nbytes += 6 - if nbytes >= bufferFlushSize { - if w.err != nil { - nbytes = 0 - return - } - _, w.err = w.writer.Write(w.bytes[:nbytes]) - nbytes = 0 - } - } - continue - } - - // Write the length - length := t.length() - lengthCode := lengthCode(length) & 31 - if false { - w.writeCode(lengths[lengthCode]) - } else { - // inlined - c := lengths[lengthCode] - bits |= c.code64() << (nbits & 63) - nbits += c.len() - if nbits >= 48 { - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits - bits >>= 48 - nbits -= 48 - nbytes += 6 - if nbytes >= bufferFlushSize { - if w.err != nil { - nbytes = 0 - return - } - _, w.err = w.writer.Write(w.bytes[:nbytes]) - nbytes = 0 - } - } - } - - if lengthCode >= lengthExtraBitsMinCode { - extraLengthBits := lengthExtraBits[lengthCode] - //w.writeBits(extraLength, extraLengthBits) - extraLength := int32(length - lengthBase[lengthCode]) - bits |= uint64(extraLength) << (nbits & 63) - nbits += extraLengthBits - if nbits >= 48 { - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits - bits >>= 48 - nbits -= 48 - nbytes += 6 - if nbytes >= bufferFlushSize { - if w.err != nil { - nbytes = 0 - return - } - _, w.err = w.writer.Write(w.bytes[:nbytes]) - nbytes = 0 - } - } - } - // Write the offset - offset := t.offset() - offsetCode := (offset >> 16) & 31 - if false { - w.writeCode(offs[offsetCode]) - } else { - // inlined - c := offs[offsetCode] - bits |= c.code64() << (nbits & 63) - nbits += c.len() - if nbits >= 48 { - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits - bits >>= 48 - nbits -= 48 - nbytes += 6 - if nbytes >= bufferFlushSize { - if w.err != nil { - nbytes = 0 - return - } - _, w.err = w.writer.Write(w.bytes[:nbytes]) - nbytes = 0 - } - } - } - - if offsetCode >= offsetExtraBitsMinCode { - offsetComb := offsetCombined[offsetCode] - //w.writeBits(extraOffset, extraOffsetBits) - bits |= uint64((offset-(offsetComb>>8))&matchOffsetOnlyMask) << (nbits & 63) - nbits += uint8(offsetComb) - if nbits >= 48 { - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits - bits >>= 48 - nbits -= 48 - nbytes += 6 - if nbytes >= bufferFlushSize { - if w.err != nil { - nbytes = 0 - return - } - _, w.err = w.writer.Write(w.bytes[:nbytes]) - nbytes = 0 - } - } - } - } - // Restore... - w.bits, w.nbits, w.nbytes = bits, nbits, nbytes - - if deferEOB { - w.writeCode(leCodes[endBlockMarker]) - } -} - -// huffOffset is a static offset encoder used for huffman only encoding. -// It can be reused since we will not be encoding offset values. -var huffOffset *huffmanEncoder - -func init() { - w := newHuffmanBitWriter(nil) - w.offsetFreq[0] = 1 - huffOffset = newHuffmanEncoder(offsetCodeCount) - huffOffset.generate(w.offsetFreq[:offsetCodeCount], 15) -} - -// writeBlockHuff encodes a block of bytes as either -// Huffman encoded literals or uncompressed bytes if the -// results only gains very little from compression. -func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { - if w.err != nil { - return - } - - // Clear histogram - for i := range w.literalFreq[:] { - w.literalFreq[i] = 0 - } - if !w.lastHuffMan { - for i := range w.offsetFreq[:] { - w.offsetFreq[i] = 0 - } - } - - const numLiterals = endBlockMarker + 1 - const numOffsets = 1 - - // Add everything as literals - // We have to estimate the header size. - // Assume header is around 70 bytes: - // https://stackoverflow.com/a/25454430 - const guessHeaderSizeBits = 70 * 8 - histogram(input, w.literalFreq[:numLiterals]) - ssize, storable := w.storedSize(input) - if storable && len(input) > 1024 { - // Quick check for incompressible content. - abs := float64(0) - avg := float64(len(input)) / 256 - max := float64(len(input) * 2) - for _, v := range w.literalFreq[:256] { - diff := float64(v) - avg - abs += diff * diff - if abs > max { - break - } - } - if abs < max { - if debugDeflate { - fmt.Println("stored", abs, "<", max) - } - // No chance we can compress this... - w.writeStoredHeader(len(input), eof) - w.writeBytes(input) - return - } - } - w.literalFreq[endBlockMarker] = 1 - w.tmpLitEncoding.generate(w.literalFreq[:numLiterals], 15) - estBits := w.tmpLitEncoding.canReuseBits(w.literalFreq[:numLiterals]) - if estBits < math.MaxInt32 { - estBits += w.lastHeader - if w.lastHeader == 0 { - estBits += guessHeaderSizeBits - } - estBits += estBits >> w.logNewTablePenalty - } - - // Store bytes, if we don't get a reasonable improvement. - if storable && ssize <= estBits { - if debugDeflate { - fmt.Println("stored,", ssize, "<=", estBits) - } - w.writeStoredHeader(len(input), eof) - w.writeBytes(input) - return - } - - if w.lastHeader > 0 { - reuseSize := w.literalEncoding.canReuseBits(w.literalFreq[:256]) - - if estBits < reuseSize { - if debugDeflate { - fmt.Println("NOT reusing, reuse:", reuseSize/8, "> new:", estBits/8, "header est:", w.lastHeader/8, "bytes") - } - // We owe an EOB - w.writeCode(w.literalEncoding.codes[endBlockMarker]) - w.lastHeader = 0 - } else if debugDeflate { - fmt.Println("reusing, reuse:", reuseSize/8, "> new:", estBits/8, "- header est:", w.lastHeader/8) - } - } - - count := 0 - if w.lastHeader == 0 { - // Use the temp encoding, so swap. - w.literalEncoding, w.tmpLitEncoding = w.tmpLitEncoding, w.literalEncoding - // Generate codegen and codegenFrequencies, which indicates how to encode - // the literalEncoding and the offsetEncoding. - w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset) - w.codegenEncoding.generate(w.codegenFreq[:], 7) - numCodegens := w.codegens() - - // Huffman. - w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) - w.lastHuffMan = true - w.lastHeader, _ = w.headerSize() - if debugDeflate { - count += w.lastHeader - fmt.Println("header:", count/8) - } - } - - encoding := w.literalEncoding.codes[:256] - // Go 1.16 LOVES having these on stack. At least 1.5x the speed. - bits, nbits, nbytes := w.bits, w.nbits, w.nbytes - - if debugDeflate { - count -= int(nbytes)*8 + int(nbits) - } - // Unroll, write 3 codes/loop. - // Fastest number of unrolls. - for len(input) > 3 { - // We must have at least 48 bits free. - if nbits >= 8 { - n := nbits >> 3 - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) - bits >>= (n * 8) & 63 - nbits -= n * 8 - nbytes += n - } - if nbytes >= bufferFlushSize { - if w.err != nil { - nbytes = 0 - return - } - if debugDeflate { - count += int(nbytes) * 8 - } - _, w.err = w.writer.Write(w.bytes[:nbytes]) - nbytes = 0 - } - a, b := encoding[input[0]], encoding[input[1]] - bits |= a.code64() << (nbits & 63) - bits |= b.code64() << ((nbits + a.len()) & 63) - c := encoding[input[2]] - nbits += b.len() + a.len() - bits |= c.code64() << (nbits & 63) - nbits += c.len() - input = input[3:] - } - - // Remaining... - for _, t := range input { - if nbits >= 48 { - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits - bits >>= 48 - nbits -= 48 - nbytes += 6 - if nbytes >= bufferFlushSize { - if w.err != nil { - nbytes = 0 - return - } - if debugDeflate { - count += int(nbytes) * 8 - } - _, w.err = w.writer.Write(w.bytes[:nbytes]) - nbytes = 0 - } - } - // Bitwriting inlined, ~30% speedup - c := encoding[t] - bits |= c.code64() << (nbits & 63) - - nbits += c.len() - if debugDeflate { - count += int(c.len()) - } - } - // Restore... - w.bits, w.nbits, w.nbytes = bits, nbits, nbytes - - if debugDeflate { - nb := count + int(nbytes)*8 + int(nbits) - fmt.Println("wrote", nb, "bits,", nb/8, "bytes.") - } - // Flush if needed to have space. - if w.nbits >= 48 { - w.writeOutBits() - } - - if eof || sync { - w.writeCode(w.literalEncoding.codes[endBlockMarker]) - w.lastHeader = 0 - w.lastHuffMan = false - } -} +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "encoding/binary" + "fmt" + "io" + "math" +) + +const ( + // The largest offset code. + offsetCodeCount = 30 + + // The special code used to mark the end of a block. + endBlockMarker = 256 + + // The first length code. + lengthCodesStart = 257 + + // The number of codegen codes. + codegenCodeCount = 19 + badCode = 255 + + // maxPredefinedTokens is the maximum number of tokens + // where we check if fixed size is smaller. + maxPredefinedTokens = 250 + + // bufferFlushSize indicates the buffer size + // after which bytes are flushed to the writer. + // Should preferably be a multiple of 6, since + // we accumulate 6 bytes between writes to the buffer. + bufferFlushSize = 246 + + // bufferSize is the actual output byte buffer size. + // It must have additional headroom for a flush + // which can contain up to 8 bytes. + bufferSize = bufferFlushSize + 8 +) + +// Minimum length code that emits bits. +const lengthExtraBitsMinCode = 8 + +// The number of extra bits needed by length code X - LENGTH_CODES_START. +var lengthExtraBits = [32]uint8{ + /* 257 */ 0, 0, 0, + /* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, + /* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, + /* 280 */ 4, 5, 5, 5, 5, 0, +} + +// The length indicated by length code X - LENGTH_CODES_START. +var lengthBase = [32]uint8{ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, + 12, 14, 16, 20, 24, 28, 32, 40, 48, 56, + 64, 80, 96, 112, 128, 160, 192, 224, 255, +} + +// Minimum offset code that emits bits. +const offsetExtraBitsMinCode = 4 + +// offset code word extra bits. +var offsetExtraBits = [32]int8{ + 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, + 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, + 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, + /* extended window */ + 14, 14, +} + +var offsetCombined = [32]uint32{} + +func init() { + var offsetBase = [32]uint32{ + /* normal deflate */ + 0x000000, 0x000001, 0x000002, 0x000003, 0x000004, + 0x000006, 0x000008, 0x00000c, 0x000010, 0x000018, + 0x000020, 0x000030, 0x000040, 0x000060, 0x000080, + 0x0000c0, 0x000100, 0x000180, 0x000200, 0x000300, + 0x000400, 0x000600, 0x000800, 0x000c00, 0x001000, + 0x001800, 0x002000, 0x003000, 0x004000, 0x006000, + + /* extended window */ + 0x008000, 0x00c000, + } + + for i := range offsetCombined[:] { + // Don't use extended window values... + if offsetExtraBits[i] == 0 || offsetBase[i] > 0x006000 { + continue + } + offsetCombined[i] = uint32(offsetExtraBits[i]) | (offsetBase[i] << 8) + } +} + +// The odd order in which the codegen code sizes are written. +var codegenOrder = []uint32{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15} + +type huffmanBitWriter struct { + // writer is the underlying writer. + // Do not use it directly; use the write method, which ensures + // that Write errors are sticky. + writer io.Writer + + // Data waiting to be written is bytes[0:nbytes] + // and then the low nbits of bits. + bits uint64 + nbits uint8 + nbytes uint8 + lastHuffMan bool + literalEncoding *huffmanEncoder + tmpLitEncoding *huffmanEncoder + offsetEncoding *huffmanEncoder + codegenEncoding *huffmanEncoder + err error + lastHeader int + // Set between 0 (reused block can be up to 2x the size) + logNewTablePenalty uint + bytes [256 + 8]byte + literalFreq [lengthCodesStart + 32]uint16 + offsetFreq [32]uint16 + codegenFreq [codegenCodeCount]uint16 + + // codegen must have an extra space for the final symbol. + codegen [literalCount + offsetCodeCount + 1]uint8 +} + +// Huffman reuse. +// +// The huffmanBitWriter supports reusing huffman tables and thereby combining block sections. +// +// This is controlled by several variables: +// +// If lastHeader is non-zero the Huffman table can be reused. +// This also indicates that a Huffman table has been generated that can output all +// possible symbols. +// It also indicates that an EOB has not yet been emitted, so if a new tabel is generated +// an EOB with the previous table must be written. +// +// If lastHuffMan is set, a table for outputting literals has been generated and offsets are invalid. +// +// An incoming block estimates the output size of a new table using a 'fresh' by calculating the +// optimal size and adding a penalty in 'logNewTablePenalty'. +// A Huffman table is not optimal, which is why we add a penalty, and generating a new table +// is slower both for compression and decompression. + +func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter { + return &huffmanBitWriter{ + writer: w, + literalEncoding: newHuffmanEncoder(literalCount), + tmpLitEncoding: newHuffmanEncoder(literalCount), + codegenEncoding: newHuffmanEncoder(codegenCodeCount), + offsetEncoding: newHuffmanEncoder(offsetCodeCount), + } +} + +func (w *huffmanBitWriter) reset(writer io.Writer) { + w.writer = writer + w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil + w.lastHeader = 0 + w.lastHuffMan = false +} + +func (w *huffmanBitWriter) canReuse(t *tokens) (ok bool) { + a := t.offHist[:offsetCodeCount] + b := w.offsetEncoding.codes + b = b[:len(a)] + for i, v := range a { + if v != 0 && b[i].zero() { + return false + } + } + + a = t.extraHist[:literalCount-256] + b = w.literalEncoding.codes[256:literalCount] + b = b[:len(a)] + for i, v := range a { + if v != 0 && b[i].zero() { + return false + } + } + + a = t.litHist[:256] + b = w.literalEncoding.codes[:len(a)] + for i, v := range a { + if v != 0 && b[i].zero() { + return false + } + } + return true +} + +func (w *huffmanBitWriter) flush() { + if w.err != nil { + w.nbits = 0 + return + } + if w.lastHeader > 0 { + // We owe an EOB + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + } + n := w.nbytes + for w.nbits != 0 { + w.bytes[n] = byte(w.bits) + w.bits >>= 8 + if w.nbits > 8 { // Avoid underflow + w.nbits -= 8 + } else { + w.nbits = 0 + } + n++ + } + w.bits = 0 + w.write(w.bytes[:n]) + w.nbytes = 0 +} + +func (w *huffmanBitWriter) write(b []byte) { + if w.err != nil { + return + } + _, w.err = w.writer.Write(b) +} + +func (w *huffmanBitWriter) writeBits(b int32, nb uint8) { + w.bits |= uint64(b) << (w.nbits & 63) + w.nbits += nb + if w.nbits >= 48 { + w.writeOutBits() + } +} + +func (w *huffmanBitWriter) writeBytes(bytes []byte) { + if w.err != nil { + return + } + n := w.nbytes + if w.nbits&7 != 0 { + w.err = InternalError("writeBytes with unfinished bits") + return + } + for w.nbits != 0 { + w.bytes[n] = byte(w.bits) + w.bits >>= 8 + w.nbits -= 8 + n++ + } + if n != 0 { + w.write(w.bytes[:n]) + } + w.nbytes = 0 + w.write(bytes) +} + +// RFC 1951 3.2.7 specifies a special run-length encoding for specifying +// the literal and offset lengths arrays (which are concatenated into a single +// array). This method generates that run-length encoding. +// +// The result is written into the codegen array, and the frequencies +// of each code is written into the codegenFreq array. +// Codes 0-15 are single byte codes. Codes 16-18 are followed by additional +// information. Code badCode is an end marker +// +// numLiterals The number of literals in literalEncoding +// numOffsets The number of offsets in offsetEncoding +// litenc, offenc The literal and offset encoder to use +func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litEnc, offEnc *huffmanEncoder) { + for i := range w.codegenFreq { + w.codegenFreq[i] = 0 + } + // Note that we are using codegen both as a temporary variable for holding + // a copy of the frequencies, and as the place where we put the result. + // This is fine because the output is always shorter than the input used + // so far. + codegen := w.codegen[:] // cache + // Copy the concatenated code sizes to codegen. Put a marker at the end. + cgnl := codegen[:numLiterals] + for i := range cgnl { + cgnl[i] = litEnc.codes[i].len() + } + + cgnl = codegen[numLiterals : numLiterals+numOffsets] + for i := range cgnl { + cgnl[i] = offEnc.codes[i].len() + } + codegen[numLiterals+numOffsets] = badCode + + size := codegen[0] + count := 1 + outIndex := 0 + for inIndex := 1; size != badCode; inIndex++ { + // INVARIANT: We have seen "count" copies of size that have not yet + // had output generated for them. + nextSize := codegen[inIndex] + if nextSize == size { + count++ + continue + } + // We need to generate codegen indicating "count" of size. + if size != 0 { + codegen[outIndex] = size + outIndex++ + w.codegenFreq[size]++ + count-- + for count >= 3 { + n := 6 + if n > count { + n = count + } + codegen[outIndex] = 16 + outIndex++ + codegen[outIndex] = uint8(n - 3) + outIndex++ + w.codegenFreq[16]++ + count -= n + } + } else { + for count >= 11 { + n := 138 + if n > count { + n = count + } + codegen[outIndex] = 18 + outIndex++ + codegen[outIndex] = uint8(n - 11) + outIndex++ + w.codegenFreq[18]++ + count -= n + } + if count >= 3 { + // count >= 3 && count <= 10 + codegen[outIndex] = 17 + outIndex++ + codegen[outIndex] = uint8(count - 3) + outIndex++ + w.codegenFreq[17]++ + count = 0 + } + } + count-- + for ; count >= 0; count-- { + codegen[outIndex] = size + outIndex++ + w.codegenFreq[size]++ + } + // Set up invariant for next time through the loop. + size = nextSize + count = 1 + } + // Marker indicating the end of the codegen. + codegen[outIndex] = badCode +} + +func (w *huffmanBitWriter) codegens() int { + numCodegens := len(w.codegenFreq) + for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 { + numCodegens-- + } + return numCodegens +} + +func (w *huffmanBitWriter) headerSize() (size, numCodegens int) { + numCodegens = len(w.codegenFreq) + for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 { + numCodegens-- + } + return 3 + 5 + 5 + 4 + (3 * numCodegens) + + w.codegenEncoding.bitLength(w.codegenFreq[:]) + + int(w.codegenFreq[16])*2 + + int(w.codegenFreq[17])*3 + + int(w.codegenFreq[18])*7, numCodegens +} + +// dynamicSize returns the size of dynamically encoded data in bits. +func (w *huffmanBitWriter) dynamicReuseSize(litEnc, offEnc *huffmanEncoder) (size int) { + size = litEnc.bitLength(w.literalFreq[:]) + + offEnc.bitLength(w.offsetFreq[:]) + return size +} + +// dynamicSize returns the size of dynamically encoded data in bits. +func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) { + header, numCodegens := w.headerSize() + size = header + + litEnc.bitLength(w.literalFreq[:]) + + offEnc.bitLength(w.offsetFreq[:]) + + extraBits + return size, numCodegens +} + +// extraBitSize will return the number of bits that will be written +// as "extra" bits on matches. +func (w *huffmanBitWriter) extraBitSize() int { + total := 0 + for i, n := range w.literalFreq[257:literalCount] { + total += int(n) * int(lengthExtraBits[i&31]) + } + for i, n := range w.offsetFreq[:offsetCodeCount] { + total += int(n) * int(offsetExtraBits[i&31]) + } + return total +} + +// fixedSize returns the size of dynamically encoded data in bits. +func (w *huffmanBitWriter) fixedSize(extraBits int) int { + return 3 + + fixedLiteralEncoding.bitLength(w.literalFreq[:]) + + fixedOffsetEncoding.bitLength(w.offsetFreq[:]) + + extraBits +} + +// storedSize calculates the stored size, including header. +// The function returns the size in bits and whether the block +// fits inside a single block. +func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) { + if in == nil { + return 0, false + } + if len(in) <= maxStoreBlockSize { + return (len(in) + 5) * 8, true + } + return 0, false +} + +func (w *huffmanBitWriter) writeCode(c hcode) { + // The function does not get inlined if we "& 63" the shift. + w.bits |= c.code64() << (w.nbits & 63) + w.nbits += c.len() + if w.nbits >= 48 { + w.writeOutBits() + } +} + +// writeOutBits will write bits to the buffer. +func (w *huffmanBitWriter) writeOutBits() { + bits := w.bits + w.bits >>= 48 + w.nbits -= 48 + n := w.nbytes + + // We over-write, but faster... + binary.LittleEndian.PutUint64(w.bytes[n:], bits) + n += 6 + + if n >= bufferFlushSize { + if w.err != nil { + n = 0 + return + } + w.write(w.bytes[:n]) + n = 0 + } + + w.nbytes = n +} + +// Write the header of a dynamic Huffman block to the output stream. +// +// numLiterals The number of literals specified in codegen +// numOffsets The number of offsets specified in codegen +// numCodegens The number of codegens used in codegen +func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, numCodegens int, isEof bool) { + if w.err != nil { + return + } + var firstBits int32 = 4 + if isEof { + firstBits = 5 + } + w.writeBits(firstBits, 3) + w.writeBits(int32(numLiterals-257), 5) + w.writeBits(int32(numOffsets-1), 5) + w.writeBits(int32(numCodegens-4), 4) + + for i := 0; i < numCodegens; i++ { + value := uint(w.codegenEncoding.codes[codegenOrder[i]].len()) + w.writeBits(int32(value), 3) + } + + i := 0 + for { + var codeWord = uint32(w.codegen[i]) + i++ + if codeWord == badCode { + break + } + w.writeCode(w.codegenEncoding.codes[codeWord]) + + switch codeWord { + case 16: + w.writeBits(int32(w.codegen[i]), 2) + i++ + case 17: + w.writeBits(int32(w.codegen[i]), 3) + i++ + case 18: + w.writeBits(int32(w.codegen[i]), 7) + i++ + } + } +} + +// writeStoredHeader will write a stored header. +// If the stored block is only used for EOF, +// it is replaced with a fixed huffman block. +func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) { + if w.err != nil { + return + } + if w.lastHeader > 0 { + // We owe an EOB + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + } + + // To write EOF, use a fixed encoding block. 10 bits instead of 5 bytes. + if length == 0 && isEof { + w.writeFixedHeader(isEof) + // EOB: 7 bits, value: 0 + w.writeBits(0, 7) + w.flush() + return + } + + var flag int32 + if isEof { + flag = 1 + } + w.writeBits(flag, 3) + w.flush() + w.writeBits(int32(length), 16) + w.writeBits(int32(^uint16(length)), 16) +} + +func (w *huffmanBitWriter) writeFixedHeader(isEof bool) { + if w.err != nil { + return + } + if w.lastHeader > 0 { + // We owe an EOB + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + } + + // Indicate that we are a fixed Huffman block + var value int32 = 2 + if isEof { + value = 3 + } + w.writeBits(value, 3) +} + +// writeBlock will write a block of tokens with the smallest encoding. +// The original input can be supplied, and if the huffman encoded data +// is larger than the original bytes, the data will be written as a +// stored block. +// If the input is nil, the tokens will always be Huffman encoded. +func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) { + if w.err != nil { + return + } + + tokens.AddEOB() + if w.lastHeader > 0 { + // We owe an EOB + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + } + numLiterals, numOffsets := w.indexTokens(tokens, false) + w.generate() + var extraBits int + storedSize, storable := w.storedSize(input) + if storable { + extraBits = w.extraBitSize() + } + + // Figure out smallest code. + // Fixed Huffman baseline. + var literalEncoding = fixedLiteralEncoding + var offsetEncoding = fixedOffsetEncoding + var size = math.MaxInt32 + if tokens.n < maxPredefinedTokens { + size = w.fixedSize(extraBits) + } + + // Dynamic Huffman? + var numCodegens int + + // Generate codegen and codegenFrequencies, which indicates how to encode + // the literalEncoding and the offsetEncoding. + w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding) + w.codegenEncoding.generate(w.codegenFreq[:], 7) + dynamicSize, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits) + + if dynamicSize < size { + size = dynamicSize + literalEncoding = w.literalEncoding + offsetEncoding = w.offsetEncoding + } + + // Stored bytes? + if storable && storedSize <= size { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + + // Huffman. + if literalEncoding == fixedLiteralEncoding { + w.writeFixedHeader(eof) + } else { + w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) + } + + // Write the tokens. + w.writeTokens(tokens.Slice(), literalEncoding.codes, offsetEncoding.codes) +} + +// writeBlockDynamic encodes a block using a dynamic Huffman table. +// This should be used if the symbols used have a disproportionate +// histogram distribution. +// If input is supplied and the compression savings are below 1/16th of the +// input size the block is stored. +func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []byte, sync bool) { + if w.err != nil { + return + } + + sync = sync || eof + if sync { + tokens.AddEOB() + } + + // We cannot reuse pure huffman table, and must mark as EOF. + if (w.lastHuffMan || eof) && w.lastHeader > 0 { + // We will not try to reuse. + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + w.lastHuffMan = false + } + + // fillReuse enables filling of empty values. + // This will make encodings always reusable without testing. + // However, this does not appear to benefit on most cases. + const fillReuse = false + + // Check if we can reuse... + if !fillReuse && w.lastHeader > 0 && !w.canReuse(tokens) { + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + } + + numLiterals, numOffsets := w.indexTokens(tokens, !sync) + extraBits := 0 + ssize, storable := w.storedSize(input) + + const usePrefs = true + if storable || w.lastHeader > 0 { + extraBits = w.extraBitSize() + } + + var size int + + // Check if we should reuse. + if w.lastHeader > 0 { + // Estimate size for using a new table. + // Use the previous header size as the best estimate. + newSize := w.lastHeader + tokens.EstimatedBits() + newSize += int(w.literalEncoding.codes[endBlockMarker].len()) + newSize>>w.logNewTablePenalty + + // The estimated size is calculated as an optimal table. + // We add a penalty to make it more realistic and re-use a bit more. + reuseSize := w.dynamicReuseSize(w.literalEncoding, w.offsetEncoding) + extraBits + + // Check if a new table is better. + if newSize < reuseSize { + // Write the EOB we owe. + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + size = newSize + w.lastHeader = 0 + } else { + size = reuseSize + } + + if tokens.n < maxPredefinedTokens { + if preSize := w.fixedSize(extraBits) + 7; usePrefs && preSize < size { + // Check if we get a reasonable size decrease. + if storable && ssize <= size { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + w.writeFixedHeader(eof) + if !sync { + tokens.AddEOB() + } + w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes) + return + } + } + // Check if we get a reasonable size decrease. + if storable && ssize <= size { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + } + + // We want a new block/table + if w.lastHeader == 0 { + if fillReuse && !sync { + w.fillTokens() + numLiterals, numOffsets = maxNumLit, maxNumDist + } else { + w.literalFreq[endBlockMarker] = 1 + } + + w.generate() + // Generate codegen and codegenFrequencies, which indicates how to encode + // the literalEncoding and the offsetEncoding. + w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding) + w.codegenEncoding.generate(w.codegenFreq[:], 7) + + var numCodegens int + if fillReuse && !sync { + // Reindex for accurate size... + w.indexTokens(tokens, true) + } + size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits) + + // Store predefined, if we don't get a reasonable improvement. + if tokens.n < maxPredefinedTokens { + if preSize := w.fixedSize(extraBits); usePrefs && preSize <= size { + // Store bytes, if we don't get an improvement. + if storable && ssize <= preSize { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + w.writeFixedHeader(eof) + if !sync { + tokens.AddEOB() + } + w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes) + return + } + } + + if storable && ssize <= size { + // Store bytes, if we don't get an improvement. + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + + // Write Huffman table. + w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) + if !sync { + w.lastHeader, _ = w.headerSize() + } + w.lastHuffMan = false + } + + if sync { + w.lastHeader = 0 + } + // Write the tokens. + w.writeTokens(tokens.Slice(), w.literalEncoding.codes, w.offsetEncoding.codes) +} + +func (w *huffmanBitWriter) fillTokens() { + for i, v := range w.literalFreq[:literalCount] { + if v == 0 { + w.literalFreq[i] = 1 + } + } + for i, v := range w.offsetFreq[:offsetCodeCount] { + if v == 0 { + w.offsetFreq[i] = 1 + } + } +} + +// indexTokens indexes a slice of tokens, and updates +// literalFreq and offsetFreq, and generates literalEncoding +// and offsetEncoding. +// The number of literal and offset tokens is returned. +func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, numOffsets int) { + //copy(w.literalFreq[:], t.litHist[:]) + *(*[256]uint16)(w.literalFreq[:]) = t.litHist + //copy(w.literalFreq[256:], t.extraHist[:]) + *(*[32]uint16)(w.literalFreq[256:]) = t.extraHist + w.offsetFreq = t.offHist + + if t.n == 0 { + return + } + if filled { + return maxNumLit, maxNumDist + } + // get the number of literals + numLiterals = len(w.literalFreq) + for w.literalFreq[numLiterals-1] == 0 { + numLiterals-- + } + // get the number of offsets + numOffsets = len(w.offsetFreq) + for numOffsets > 0 && w.offsetFreq[numOffsets-1] == 0 { + numOffsets-- + } + if numOffsets == 0 { + // We haven't found a single match. If we want to go with the dynamic encoding, + // we should count at least one offset to be sure that the offset huffman tree could be encoded. + w.offsetFreq[0] = 1 + numOffsets = 1 + } + return +} + +func (w *huffmanBitWriter) generate() { + w.literalEncoding.generate(w.literalFreq[:literalCount], 15) + w.offsetEncoding.generate(w.offsetFreq[:offsetCodeCount], 15) +} + +// writeTokens writes a slice of tokens to the output. +// codes for literal and offset encoding must be supplied. +func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) { + if w.err != nil { + return + } + if len(tokens) == 0 { + return + } + + // Only last token should be endBlockMarker. + var deferEOB bool + if tokens[len(tokens)-1] == endBlockMarker { + tokens = tokens[:len(tokens)-1] + deferEOB = true + } + + // Create slices up to the next power of two to avoid bounds checks. + lits := leCodes[:256] + offs := oeCodes[:32] + lengths := leCodes[lengthCodesStart:] + lengths = lengths[:32] + + // Go 1.16 LOVES having these on stack. + bits, nbits, nbytes := w.bits, w.nbits, w.nbytes + + for _, t := range tokens { + if t < 256 { + //w.writeCode(lits[t.literal()]) + c := lits[t] + bits |= c.code64() << (nbits & 63) + nbits += c.len() + if nbits >= 48 { + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + bits >>= 48 + nbits -= 48 + nbytes += 6 + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } + } + continue + } + + // Write the length + length := t.length() + lengthCode := lengthCode(length) & 31 + if false { + w.writeCode(lengths[lengthCode]) + } else { + // inlined + c := lengths[lengthCode] + bits |= c.code64() << (nbits & 63) + nbits += c.len() + if nbits >= 48 { + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + bits >>= 48 + nbits -= 48 + nbytes += 6 + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } + } + } + + if lengthCode >= lengthExtraBitsMinCode { + extraLengthBits := lengthExtraBits[lengthCode] + //w.writeBits(extraLength, extraLengthBits) + extraLength := int32(length - lengthBase[lengthCode]) + bits |= uint64(extraLength) << (nbits & 63) + nbits += extraLengthBits + if nbits >= 48 { + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + bits >>= 48 + nbits -= 48 + nbytes += 6 + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } + } + } + // Write the offset + offset := t.offset() + offsetCode := (offset >> 16) & 31 + if false { + w.writeCode(offs[offsetCode]) + } else { + // inlined + c := offs[offsetCode] + bits |= c.code64() << (nbits & 63) + nbits += c.len() + if nbits >= 48 { + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + bits >>= 48 + nbits -= 48 + nbytes += 6 + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } + } + } + + if offsetCode >= offsetExtraBitsMinCode { + offsetComb := offsetCombined[offsetCode] + //w.writeBits(extraOffset, extraOffsetBits) + bits |= uint64((offset-(offsetComb>>8))&matchOffsetOnlyMask) << (nbits & 63) + nbits += uint8(offsetComb) + if nbits >= 48 { + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + bits >>= 48 + nbits -= 48 + nbytes += 6 + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } + } + } + } + // Restore... + w.bits, w.nbits, w.nbytes = bits, nbits, nbytes + + if deferEOB { + w.writeCode(leCodes[endBlockMarker]) + } +} + +// huffOffset is a static offset encoder used for huffman only encoding. +// It can be reused since we will not be encoding offset values. +var huffOffset *huffmanEncoder + +func init() { + w := newHuffmanBitWriter(nil) + w.offsetFreq[0] = 1 + huffOffset = newHuffmanEncoder(offsetCodeCount) + huffOffset.generate(w.offsetFreq[:offsetCodeCount], 15) +} + +// writeBlockHuff encodes a block of bytes as either +// Huffman encoded literals or uncompressed bytes if the +// results only gains very little from compression. +func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { + if w.err != nil { + return + } + + // Clear histogram + for i := range w.literalFreq[:] { + w.literalFreq[i] = 0 + } + if !w.lastHuffMan { + for i := range w.offsetFreq[:] { + w.offsetFreq[i] = 0 + } + } + + const numLiterals = endBlockMarker + 1 + const numOffsets = 1 + + // Add everything as literals + // We have to estimate the header size. + // Assume header is around 70 bytes: + // https://stackoverflow.com/a/25454430 + const guessHeaderSizeBits = 70 * 8 + histogram(input, w.literalFreq[:numLiterals]) + ssize, storable := w.storedSize(input) + if storable && len(input) > 1024 { + // Quick check for incompressible content. + abs := float64(0) + avg := float64(len(input)) / 256 + max := float64(len(input) * 2) + for _, v := range w.literalFreq[:256] { + diff := float64(v) - avg + abs += diff * diff + if abs > max { + break + } + } + if abs < max { + if debugDeflate { + fmt.Println("stored", abs, "<", max) + } + // No chance we can compress this... + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + } + w.literalFreq[endBlockMarker] = 1 + w.tmpLitEncoding.generate(w.literalFreq[:numLiterals], 15) + estBits := w.tmpLitEncoding.canReuseBits(w.literalFreq[:numLiterals]) + if estBits < math.MaxInt32 { + estBits += w.lastHeader + if w.lastHeader == 0 { + estBits += guessHeaderSizeBits + } + estBits += estBits >> w.logNewTablePenalty + } + + // Store bytes, if we don't get a reasonable improvement. + if storable && ssize <= estBits { + if debugDeflate { + fmt.Println("stored,", ssize, "<=", estBits) + } + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + + if w.lastHeader > 0 { + reuseSize := w.literalEncoding.canReuseBits(w.literalFreq[:256]) + + if estBits < reuseSize { + if debugDeflate { + fmt.Println("NOT reusing, reuse:", reuseSize/8, "> new:", estBits/8, "header est:", w.lastHeader/8, "bytes") + } + // We owe an EOB + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + } else if debugDeflate { + fmt.Println("reusing, reuse:", reuseSize/8, "> new:", estBits/8, "- header est:", w.lastHeader/8) + } + } + + count := 0 + if w.lastHeader == 0 { + // Use the temp encoding, so swap. + w.literalEncoding, w.tmpLitEncoding = w.tmpLitEncoding, w.literalEncoding + // Generate codegen and codegenFrequencies, which indicates how to encode + // the literalEncoding and the offsetEncoding. + w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset) + w.codegenEncoding.generate(w.codegenFreq[:], 7) + numCodegens := w.codegens() + + // Huffman. + w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) + w.lastHuffMan = true + w.lastHeader, _ = w.headerSize() + if debugDeflate { + count += w.lastHeader + fmt.Println("header:", count/8) + } + } + + encoding := w.literalEncoding.codes[:256] + // Go 1.16 LOVES having these on stack. At least 1.5x the speed. + bits, nbits, nbytes := w.bits, w.nbits, w.nbytes + + if debugDeflate { + count -= int(nbytes)*8 + int(nbits) + } + // Unroll, write 3 codes/loop. + // Fastest number of unrolls. + for len(input) > 3 { + // We must have at least 48 bits free. + if nbits >= 8 { + n := nbits >> 3 + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + bits >>= (n * 8) & 63 + nbits -= n * 8 + nbytes += n + } + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + if debugDeflate { + count += int(nbytes) * 8 + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } + a, b := encoding[input[0]], encoding[input[1]] + bits |= a.code64() << (nbits & 63) + bits |= b.code64() << ((nbits + a.len()) & 63) + c := encoding[input[2]] + nbits += b.len() + a.len() + bits |= c.code64() << (nbits & 63) + nbits += c.len() + input = input[3:] + } + + // Remaining... + for _, t := range input { + if nbits >= 48 { + binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + bits >>= 48 + nbits -= 48 + nbytes += 6 + if nbytes >= bufferFlushSize { + if w.err != nil { + nbytes = 0 + return + } + if debugDeflate { + count += int(nbytes) * 8 + } + _, w.err = w.writer.Write(w.bytes[:nbytes]) + nbytes = 0 + } + } + // Bitwriting inlined, ~30% speedup + c := encoding[t] + bits |= c.code64() << (nbits & 63) + + nbits += c.len() + if debugDeflate { + count += int(c.len()) + } + } + // Restore... + w.bits, w.nbits, w.nbytes = bits, nbits, nbytes + + if debugDeflate { + nb := count + int(nbytes)*8 + int(nbits) + fmt.Println("wrote", nb, "bits,", nb/8, "bytes.") + } + // Flush if needed to have space. + if w.nbits >= 48 { + w.writeOutBits() + } + + if eof || sync { + w.writeCode(w.literalEncoding.codes[endBlockMarker]) + w.lastHeader = 0 + w.lastHuffMan = false + } +} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go index be7b58b473..4bd30ac2a9 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_code.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_code.go @@ -1,417 +1,417 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flate - -import ( - "math" - "math/bits" -) - -const ( - maxBitsLimit = 16 - // number of valid literals - literalCount = 286 -) - -// hcode is a huffman code with a bit code and bit length. -type hcode uint32 - -func (h hcode) len() uint8 { - return uint8(h) -} - -func (h hcode) code64() uint64 { - return uint64(h >> 8) -} - -func (h hcode) zero() bool { - return h == 0 -} - -type huffmanEncoder struct { - codes []hcode - bitCount [17]int32 - - // Allocate a reusable buffer with the longest possible frequency table. - // Possible lengths are codegenCodeCount, offsetCodeCount and literalCount. - // The largest of these is literalCount, so we allocate for that case. - freqcache [literalCount + 1]literalNode -} - -type literalNode struct { - literal uint16 - freq uint16 -} - -// A levelInfo describes the state of the constructed tree for a given depth. -type levelInfo struct { - // Our level. for better printing - level int32 - - // The frequency of the last node at this level - lastFreq int32 - - // The frequency of the next character to add to this level - nextCharFreq int32 - - // The frequency of the next pair (from level below) to add to this level. - // Only valid if the "needed" value of the next lower level is 0. - nextPairFreq int32 - - // The number of chains remaining to generate for this level before moving - // up to the next level - needed int32 -} - -// set sets the code and length of an hcode. -func (h *hcode) set(code uint16, length uint8) { - *h = hcode(length) | (hcode(code) << 8) -} - -func newhcode(code uint16, length uint8) hcode { - return hcode(length) | (hcode(code) << 8) -} - -func reverseBits(number uint16, bitLength byte) uint16 { - return bits.Reverse16(number << ((16 - bitLength) & 15)) -} - -func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxUint16} } - -func newHuffmanEncoder(size int) *huffmanEncoder { - // Make capacity to next power of two. - c := uint(bits.Len32(uint32(size - 1))) - return &huffmanEncoder{codes: make([]hcode, size, 1<= 3 -// The cases of 0, 1, and 2 literals are handled by special case code. -// -// list An array of the literals with non-zero frequencies -// -// and their associated frequencies. The array is in order of increasing -// frequency, and has as its last element a special element with frequency -// MaxInt32 -// -// maxBits The maximum number of bits that should be used to encode any literal. -// -// Must be less than 16. -// -// return An integer array in which array[i] indicates the number of literals -// -// that should be encoded in i bits. -func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 { - if maxBits >= maxBitsLimit { - panic("flate: maxBits too large") - } - n := int32(len(list)) - list = list[0 : n+1] - list[n] = maxNode() - - // The tree can't have greater depth than n - 1, no matter what. This - // saves a little bit of work in some small cases - if maxBits > n-1 { - maxBits = n - 1 - } - - // Create information about each of the levels. - // A bogus "Level 0" whose sole purpose is so that - // level1.prev.needed==0. This makes level1.nextPairFreq - // be a legitimate value that never gets chosen. - var levels [maxBitsLimit]levelInfo - // leafCounts[i] counts the number of literals at the left - // of ancestors of the rightmost node at level i. - // leafCounts[i][j] is the number of literals at the left - // of the level j ancestor. - var leafCounts [maxBitsLimit][maxBitsLimit]int32 - - // Descending to only have 1 bounds check. - l2f := int32(list[2].freq) - l1f := int32(list[1].freq) - l0f := int32(list[0].freq) + int32(list[1].freq) - - for level := int32(1); level <= maxBits; level++ { - // For every level, the first two items are the first two characters. - // We initialize the levels as if we had already figured this out. - levels[level] = levelInfo{ - level: level, - lastFreq: l1f, - nextCharFreq: l2f, - nextPairFreq: l0f, - } - leafCounts[level][level] = 2 - if level == 1 { - levels[level].nextPairFreq = math.MaxInt32 - } - } - - // We need a total of 2*n - 2 items at top level and have already generated 2. - levels[maxBits].needed = 2*n - 4 - - level := uint32(maxBits) - for level < 16 { - l := &levels[level] - if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 { - // We've run out of both leafs and pairs. - // End all calculations for this level. - // To make sure we never come back to this level or any lower level, - // set nextPairFreq impossibly large. - l.needed = 0 - levels[level+1].nextPairFreq = math.MaxInt32 - level++ - continue - } - - prevFreq := l.lastFreq - if l.nextCharFreq < l.nextPairFreq { - // The next item on this row is a leaf node. - n := leafCounts[level][level] + 1 - l.lastFreq = l.nextCharFreq - // Lower leafCounts are the same of the previous node. - leafCounts[level][level] = n - e := list[n] - if e.literal < math.MaxUint16 { - l.nextCharFreq = int32(e.freq) - } else { - l.nextCharFreq = math.MaxInt32 - } - } else { - // The next item on this row is a pair from the previous row. - // nextPairFreq isn't valid until we generate two - // more values in the level below - l.lastFreq = l.nextPairFreq - // Take leaf counts from the lower level, except counts[level] remains the same. - if true { - save := leafCounts[level][level] - leafCounts[level] = leafCounts[level-1] - leafCounts[level][level] = save - } else { - copy(leafCounts[level][:level], leafCounts[level-1][:level]) - } - levels[l.level-1].needed = 2 - } - - if l.needed--; l.needed == 0 { - // We've done everything we need to do for this level. - // Continue calculating one level up. Fill in nextPairFreq - // of that level with the sum of the two nodes we've just calculated on - // this level. - if l.level == maxBits { - // All done! - break - } - levels[l.level+1].nextPairFreq = prevFreq + l.lastFreq - level++ - } else { - // If we stole from below, move down temporarily to replenish it. - for levels[level-1].needed > 0 { - level-- - } - } - } - - // Somethings is wrong if at the end, the top level is null or hasn't used - // all of the leaves. - if leafCounts[maxBits][maxBits] != n { - panic("leafCounts[maxBits][maxBits] != n") - } - - bitCount := h.bitCount[:maxBits+1] - bits := 1 - counts := &leafCounts[maxBits] - for level := maxBits; level > 0; level-- { - // chain.leafCount gives the number of literals requiring at least "bits" - // bits to encode. - bitCount[bits] = counts[level] - counts[level-1] - bits++ - } - return bitCount -} - -// Look at the leaves and assign them a bit count and an encoding as specified -// in RFC 1951 3.2.2 -func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalNode) { - code := uint16(0) - for n, bits := range bitCount { - code <<= 1 - if n == 0 || bits == 0 { - continue - } - // The literals list[len(list)-bits] .. list[len(list)-bits] - // are encoded using "bits" bits, and get the values - // code, code + 1, .... The code values are - // assigned in literal order (not frequency order). - chunk := list[len(list)-int(bits):] - - sortByLiteral(chunk) - for _, node := range chunk { - h.codes[node.literal] = newhcode(reverseBits(code, uint8(n)), uint8(n)) - code++ - } - list = list[0 : len(list)-int(bits)] - } -} - -// Update this Huffman Code object to be the minimum code for the specified frequency count. -// -// freq An array of frequencies, in which frequency[i] gives the frequency of literal i. -// maxBits The maximum number of bits to use for any literal. -func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) { - list := h.freqcache[:len(freq)+1] - codes := h.codes[:len(freq)] - // Number of non-zero literals - count := 0 - // Set list to be the set of all non-zero literals and their frequencies - for i, f := range freq { - if f != 0 { - list[count] = literalNode{uint16(i), f} - count++ - } else { - codes[i] = 0 - } - } - list[count] = literalNode{} - - list = list[:count] - if count <= 2 { - // Handle the small cases here, because they are awkward for the general case code. With - // two or fewer literals, everything has bit length 1. - for i, node := range list { - // "list" is in order of increasing literal value. - h.codes[node.literal].set(uint16(i), 1) - } - return - } - sortByFreq(list) - - // Get the number of literals for each bit count - bitCount := h.bitCounts(list, maxBits) - // And do the assignment - h.assignEncodingAndSize(bitCount, list) -} - -// atLeastOne clamps the result between 1 and 15. -func atLeastOne(v float32) float32 { - if v < 1 { - return 1 - } - if v > 15 { - return 15 - } - return v -} - -func histogram(b []byte, h []uint16) { - if true && len(b) >= 8<<10 { - // Split for bigger inputs - histogramSplit(b, h) - } else { - h = h[:256] - for _, t := range b { - h[t]++ - } - } -} - -func histogramSplit(b []byte, h []uint16) { - // Tested, and slightly faster than 2-way. - // Writing to separate arrays and combining is also slightly slower. - h = h[:256] - for len(b)&3 != 0 { - h[b[0]]++ - b = b[1:] - } - n := len(b) / 4 - x, y, z, w := b[:n], b[n:], b[n+n:], b[n+n+n:] - y, z, w = y[:len(x)], z[:len(x)], w[:len(x)] - for i, t := range x { - v0 := &h[t] - v1 := &h[y[i]] - v3 := &h[w[i]] - v2 := &h[z[i]] - *v0++ - *v1++ - *v2++ - *v3++ - } -} +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "math" + "math/bits" +) + +const ( + maxBitsLimit = 16 + // number of valid literals + literalCount = 286 +) + +// hcode is a huffman code with a bit code and bit length. +type hcode uint32 + +func (h hcode) len() uint8 { + return uint8(h) +} + +func (h hcode) code64() uint64 { + return uint64(h >> 8) +} + +func (h hcode) zero() bool { + return h == 0 +} + +type huffmanEncoder struct { + codes []hcode + bitCount [17]int32 + + // Allocate a reusable buffer with the longest possible frequency table. + // Possible lengths are codegenCodeCount, offsetCodeCount and literalCount. + // The largest of these is literalCount, so we allocate for that case. + freqcache [literalCount + 1]literalNode +} + +type literalNode struct { + literal uint16 + freq uint16 +} + +// A levelInfo describes the state of the constructed tree for a given depth. +type levelInfo struct { + // Our level. for better printing + level int32 + + // The frequency of the last node at this level + lastFreq int32 + + // The frequency of the next character to add to this level + nextCharFreq int32 + + // The frequency of the next pair (from level below) to add to this level. + // Only valid if the "needed" value of the next lower level is 0. + nextPairFreq int32 + + // The number of chains remaining to generate for this level before moving + // up to the next level + needed int32 +} + +// set sets the code and length of an hcode. +func (h *hcode) set(code uint16, length uint8) { + *h = hcode(length) | (hcode(code) << 8) +} + +func newhcode(code uint16, length uint8) hcode { + return hcode(length) | (hcode(code) << 8) +} + +func reverseBits(number uint16, bitLength byte) uint16 { + return bits.Reverse16(number << ((16 - bitLength) & 15)) +} + +func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxUint16} } + +func newHuffmanEncoder(size int) *huffmanEncoder { + // Make capacity to next power of two. + c := uint(bits.Len32(uint32(size - 1))) + return &huffmanEncoder{codes: make([]hcode, size, 1<= 3 +// The cases of 0, 1, and 2 literals are handled by special case code. +// +// list An array of the literals with non-zero frequencies +// +// and their associated frequencies. The array is in order of increasing +// frequency, and has as its last element a special element with frequency +// MaxInt32 +// +// maxBits The maximum number of bits that should be used to encode any literal. +// +// Must be less than 16. +// +// return An integer array in which array[i] indicates the number of literals +// +// that should be encoded in i bits. +func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 { + if maxBits >= maxBitsLimit { + panic("flate: maxBits too large") + } + n := int32(len(list)) + list = list[0 : n+1] + list[n] = maxNode() + + // The tree can't have greater depth than n - 1, no matter what. This + // saves a little bit of work in some small cases + if maxBits > n-1 { + maxBits = n - 1 + } + + // Create information about each of the levels. + // A bogus "Level 0" whose sole purpose is so that + // level1.prev.needed==0. This makes level1.nextPairFreq + // be a legitimate value that never gets chosen. + var levels [maxBitsLimit]levelInfo + // leafCounts[i] counts the number of literals at the left + // of ancestors of the rightmost node at level i. + // leafCounts[i][j] is the number of literals at the left + // of the level j ancestor. + var leafCounts [maxBitsLimit][maxBitsLimit]int32 + + // Descending to only have 1 bounds check. + l2f := int32(list[2].freq) + l1f := int32(list[1].freq) + l0f := int32(list[0].freq) + int32(list[1].freq) + + for level := int32(1); level <= maxBits; level++ { + // For every level, the first two items are the first two characters. + // We initialize the levels as if we had already figured this out. + levels[level] = levelInfo{ + level: level, + lastFreq: l1f, + nextCharFreq: l2f, + nextPairFreq: l0f, + } + leafCounts[level][level] = 2 + if level == 1 { + levels[level].nextPairFreq = math.MaxInt32 + } + } + + // We need a total of 2*n - 2 items at top level and have already generated 2. + levels[maxBits].needed = 2*n - 4 + + level := uint32(maxBits) + for level < 16 { + l := &levels[level] + if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 { + // We've run out of both leafs and pairs. + // End all calculations for this level. + // To make sure we never come back to this level or any lower level, + // set nextPairFreq impossibly large. + l.needed = 0 + levels[level+1].nextPairFreq = math.MaxInt32 + level++ + continue + } + + prevFreq := l.lastFreq + if l.nextCharFreq < l.nextPairFreq { + // The next item on this row is a leaf node. + n := leafCounts[level][level] + 1 + l.lastFreq = l.nextCharFreq + // Lower leafCounts are the same of the previous node. + leafCounts[level][level] = n + e := list[n] + if e.literal < math.MaxUint16 { + l.nextCharFreq = int32(e.freq) + } else { + l.nextCharFreq = math.MaxInt32 + } + } else { + // The next item on this row is a pair from the previous row. + // nextPairFreq isn't valid until we generate two + // more values in the level below + l.lastFreq = l.nextPairFreq + // Take leaf counts from the lower level, except counts[level] remains the same. + if true { + save := leafCounts[level][level] + leafCounts[level] = leafCounts[level-1] + leafCounts[level][level] = save + } else { + copy(leafCounts[level][:level], leafCounts[level-1][:level]) + } + levels[l.level-1].needed = 2 + } + + if l.needed--; l.needed == 0 { + // We've done everything we need to do for this level. + // Continue calculating one level up. Fill in nextPairFreq + // of that level with the sum of the two nodes we've just calculated on + // this level. + if l.level == maxBits { + // All done! + break + } + levels[l.level+1].nextPairFreq = prevFreq + l.lastFreq + level++ + } else { + // If we stole from below, move down temporarily to replenish it. + for levels[level-1].needed > 0 { + level-- + } + } + } + + // Somethings is wrong if at the end, the top level is null or hasn't used + // all of the leaves. + if leafCounts[maxBits][maxBits] != n { + panic("leafCounts[maxBits][maxBits] != n") + } + + bitCount := h.bitCount[:maxBits+1] + bits := 1 + counts := &leafCounts[maxBits] + for level := maxBits; level > 0; level-- { + // chain.leafCount gives the number of literals requiring at least "bits" + // bits to encode. + bitCount[bits] = counts[level] - counts[level-1] + bits++ + } + return bitCount +} + +// Look at the leaves and assign them a bit count and an encoding as specified +// in RFC 1951 3.2.2 +func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalNode) { + code := uint16(0) + for n, bits := range bitCount { + code <<= 1 + if n == 0 || bits == 0 { + continue + } + // The literals list[len(list)-bits] .. list[len(list)-bits] + // are encoded using "bits" bits, and get the values + // code, code + 1, .... The code values are + // assigned in literal order (not frequency order). + chunk := list[len(list)-int(bits):] + + sortByLiteral(chunk) + for _, node := range chunk { + h.codes[node.literal] = newhcode(reverseBits(code, uint8(n)), uint8(n)) + code++ + } + list = list[0 : len(list)-int(bits)] + } +} + +// Update this Huffman Code object to be the minimum code for the specified frequency count. +// +// freq An array of frequencies, in which frequency[i] gives the frequency of literal i. +// maxBits The maximum number of bits to use for any literal. +func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) { + list := h.freqcache[:len(freq)+1] + codes := h.codes[:len(freq)] + // Number of non-zero literals + count := 0 + // Set list to be the set of all non-zero literals and their frequencies + for i, f := range freq { + if f != 0 { + list[count] = literalNode{uint16(i), f} + count++ + } else { + codes[i] = 0 + } + } + list[count] = literalNode{} + + list = list[:count] + if count <= 2 { + // Handle the small cases here, because they are awkward for the general case code. With + // two or fewer literals, everything has bit length 1. + for i, node := range list { + // "list" is in order of increasing literal value. + h.codes[node.literal].set(uint16(i), 1) + } + return + } + sortByFreq(list) + + // Get the number of literals for each bit count + bitCount := h.bitCounts(list, maxBits) + // And do the assignment + h.assignEncodingAndSize(bitCount, list) +} + +// atLeastOne clamps the result between 1 and 15. +func atLeastOne(v float32) float32 { + if v < 1 { + return 1 + } + if v > 15 { + return 15 + } + return v +} + +func histogram(b []byte, h []uint16) { + if true && len(b) >= 8<<10 { + // Split for bigger inputs + histogramSplit(b, h) + } else { + h = h[:256] + for _, t := range b { + h[t]++ + } + } +} + +func histogramSplit(b []byte, h []uint16) { + // Tested, and slightly faster than 2-way. + // Writing to separate arrays and combining is also slightly slower. + h = h[:256] + for len(b)&3 != 0 { + h[b[0]]++ + b = b[1:] + } + n := len(b) / 4 + x, y, z, w := b[:n], b[n:], b[n+n:], b[n+n+n:] + y, z, w = y[:len(x)], z[:len(x)], w[:len(x)] + for i, t := range x { + v0 := &h[t] + v1 := &h[y[i]] + v3 := &h[w[i]] + v2 := &h[z[i]] + *v0++ + *v1++ + *v2++ + *v3++ + } +} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go b/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go index 2077802990..9e6edba42c 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go @@ -1,178 +1,178 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flate - -// Sort sorts data. -// It makes one call to data.Len to determine n, and O(n*log(n)) calls to -// data.Less and data.Swap. The sort is not guaranteed to be stable. -func sortByFreq(data []literalNode) { - n := len(data) - quickSortByFreq(data, 0, n, maxDepth(n)) -} - -func quickSortByFreq(data []literalNode, a, b, maxDepth int) { - for b-a > 12 { // Use ShellSort for slices <= 12 elements - if maxDepth == 0 { - heapSort(data, a, b) - return - } - maxDepth-- - mlo, mhi := doPivotByFreq(data, a, b) - // Avoiding recursion on the larger subproblem guarantees - // a stack depth of at most lg(b-a). - if mlo-a < b-mhi { - quickSortByFreq(data, a, mlo, maxDepth) - a = mhi // i.e., quickSortByFreq(data, mhi, b) - } else { - quickSortByFreq(data, mhi, b, maxDepth) - b = mlo // i.e., quickSortByFreq(data, a, mlo) - } - } - if b-a > 1 { - // Do ShellSort pass with gap 6 - // It could be written in this simplified form cause b-a <= 12 - for i := a + 6; i < b; i++ { - if data[i].freq == data[i-6].freq && data[i].literal < data[i-6].literal || data[i].freq < data[i-6].freq { - data[i], data[i-6] = data[i-6], data[i] - } - } - insertionSortByFreq(data, a, b) - } -} - -// siftDownByFreq implements the heap property on data[lo, hi). -// first is an offset into the array where the root of the heap lies. -func siftDownByFreq(data []literalNode, lo, hi, first int) { - root := lo - for { - child := 2*root + 1 - if child >= hi { - break - } - if child+1 < hi && (data[first+child].freq == data[first+child+1].freq && data[first+child].literal < data[first+child+1].literal || data[first+child].freq < data[first+child+1].freq) { - child++ - } - if data[first+root].freq == data[first+child].freq && data[first+root].literal > data[first+child].literal || data[first+root].freq > data[first+child].freq { - return - } - data[first+root], data[first+child] = data[first+child], data[first+root] - root = child - } -} -func doPivotByFreq(data []literalNode, lo, hi int) (midlo, midhi int) { - m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow. - if hi-lo > 40 { - // Tukey's ``Ninther,'' median of three medians of three. - s := (hi - lo) / 8 - medianOfThreeSortByFreq(data, lo, lo+s, lo+2*s) - medianOfThreeSortByFreq(data, m, m-s, m+s) - medianOfThreeSortByFreq(data, hi-1, hi-1-s, hi-1-2*s) - } - medianOfThreeSortByFreq(data, lo, m, hi-1) - - // Invariants are: - // data[lo] = pivot (set up by ChoosePivot) - // data[lo < i < a] < pivot - // data[a <= i < b] <= pivot - // data[b <= i < c] unexamined - // data[c <= i < hi-1] > pivot - // data[hi-1] >= pivot - pivot := lo - a, c := lo+1, hi-1 - - for ; a < c && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ { - } - b := a - for { - for ; b < c && (data[pivot].freq == data[b].freq && data[pivot].literal > data[b].literal || data[pivot].freq > data[b].freq); b++ { // data[b] <= pivot - } - for ; b < c && (data[pivot].freq == data[c-1].freq && data[pivot].literal < data[c-1].literal || data[pivot].freq < data[c-1].freq); c-- { // data[c-1] > pivot - } - if b >= c { - break - } - // data[b] > pivot; data[c-1] <= pivot - data[b], data[c-1] = data[c-1], data[b] - b++ - c-- - } - // If hi-c<3 then there are duplicates (by property of median of nine). - // Let's be a bit more conservative, and set border to 5. - protect := hi-c < 5 - if !protect && hi-c < (hi-lo)/4 { - // Lets test some points for equality to pivot - dups := 0 - if data[pivot].freq == data[hi-1].freq && data[pivot].literal > data[hi-1].literal || data[pivot].freq > data[hi-1].freq { // data[hi-1] = pivot - data[c], data[hi-1] = data[hi-1], data[c] - c++ - dups++ - } - if data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq { // data[b-1] = pivot - b-- - dups++ - } - // m-lo = (hi-lo)/2 > 6 - // b-lo > (hi-lo)*3/4-1 > 8 - // ==> m < b ==> data[m] <= pivot - if data[m].freq == data[pivot].freq && data[m].literal > data[pivot].literal || data[m].freq > data[pivot].freq { // data[m] = pivot - data[m], data[b-1] = data[b-1], data[m] - b-- - dups++ - } - // if at least 2 points are equal to pivot, assume skewed distribution - protect = dups > 1 - } - if protect { - // Protect against a lot of duplicates - // Add invariant: - // data[a <= i < b] unexamined - // data[b <= i < c] = pivot - for { - for ; a < b && (data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq); b-- { // data[b] == pivot - } - for ; a < b && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ { // data[a] < pivot - } - if a >= b { - break - } - // data[a] == pivot; data[b-1] < pivot - data[a], data[b-1] = data[b-1], data[a] - a++ - b-- - } - } - // Swap pivot into middle - data[pivot], data[b-1] = data[b-1], data[pivot] - return b - 1, c -} - -// Insertion sort -func insertionSortByFreq(data []literalNode, a, b int) { - for i := a + 1; i < b; i++ { - for j := i; j > a && (data[j].freq == data[j-1].freq && data[j].literal < data[j-1].literal || data[j].freq < data[j-1].freq); j-- { - data[j], data[j-1] = data[j-1], data[j] - } - } -} - -// quickSortByFreq, loosely following Bentley and McIlroy, -// ``Engineering a Sort Function,'' SP&E November 1993. - -// medianOfThreeSortByFreq moves the median of the three values data[m0], data[m1], data[m2] into data[m1]. -func medianOfThreeSortByFreq(data []literalNode, m1, m0, m2 int) { - // sort 3 elements - if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq { - data[m1], data[m0] = data[m0], data[m1] - } - // data[m0] <= data[m1] - if data[m2].freq == data[m1].freq && data[m2].literal < data[m1].literal || data[m2].freq < data[m1].freq { - data[m2], data[m1] = data[m1], data[m2] - // data[m0] <= data[m2] && data[m1] < data[m2] - if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq { - data[m1], data[m0] = data[m0], data[m1] - } - } - // now data[m0] <= data[m1] <= data[m2] -} +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +// Sort sorts data. +// It makes one call to data.Len to determine n, and O(n*log(n)) calls to +// data.Less and data.Swap. The sort is not guaranteed to be stable. +func sortByFreq(data []literalNode) { + n := len(data) + quickSortByFreq(data, 0, n, maxDepth(n)) +} + +func quickSortByFreq(data []literalNode, a, b, maxDepth int) { + for b-a > 12 { // Use ShellSort for slices <= 12 elements + if maxDepth == 0 { + heapSort(data, a, b) + return + } + maxDepth-- + mlo, mhi := doPivotByFreq(data, a, b) + // Avoiding recursion on the larger subproblem guarantees + // a stack depth of at most lg(b-a). + if mlo-a < b-mhi { + quickSortByFreq(data, a, mlo, maxDepth) + a = mhi // i.e., quickSortByFreq(data, mhi, b) + } else { + quickSortByFreq(data, mhi, b, maxDepth) + b = mlo // i.e., quickSortByFreq(data, a, mlo) + } + } + if b-a > 1 { + // Do ShellSort pass with gap 6 + // It could be written in this simplified form cause b-a <= 12 + for i := a + 6; i < b; i++ { + if data[i].freq == data[i-6].freq && data[i].literal < data[i-6].literal || data[i].freq < data[i-6].freq { + data[i], data[i-6] = data[i-6], data[i] + } + } + insertionSortByFreq(data, a, b) + } +} + +// siftDownByFreq implements the heap property on data[lo, hi). +// first is an offset into the array where the root of the heap lies. +func siftDownByFreq(data []literalNode, lo, hi, first int) { + root := lo + for { + child := 2*root + 1 + if child >= hi { + break + } + if child+1 < hi && (data[first+child].freq == data[first+child+1].freq && data[first+child].literal < data[first+child+1].literal || data[first+child].freq < data[first+child+1].freq) { + child++ + } + if data[first+root].freq == data[first+child].freq && data[first+root].literal > data[first+child].literal || data[first+root].freq > data[first+child].freq { + return + } + data[first+root], data[first+child] = data[first+child], data[first+root] + root = child + } +} +func doPivotByFreq(data []literalNode, lo, hi int) (midlo, midhi int) { + m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow. + if hi-lo > 40 { + // Tukey's ``Ninther,'' median of three medians of three. + s := (hi - lo) / 8 + medianOfThreeSortByFreq(data, lo, lo+s, lo+2*s) + medianOfThreeSortByFreq(data, m, m-s, m+s) + medianOfThreeSortByFreq(data, hi-1, hi-1-s, hi-1-2*s) + } + medianOfThreeSortByFreq(data, lo, m, hi-1) + + // Invariants are: + // data[lo] = pivot (set up by ChoosePivot) + // data[lo < i < a] < pivot + // data[a <= i < b] <= pivot + // data[b <= i < c] unexamined + // data[c <= i < hi-1] > pivot + // data[hi-1] >= pivot + pivot := lo + a, c := lo+1, hi-1 + + for ; a < c && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ { + } + b := a + for { + for ; b < c && (data[pivot].freq == data[b].freq && data[pivot].literal > data[b].literal || data[pivot].freq > data[b].freq); b++ { // data[b] <= pivot + } + for ; b < c && (data[pivot].freq == data[c-1].freq && data[pivot].literal < data[c-1].literal || data[pivot].freq < data[c-1].freq); c-- { // data[c-1] > pivot + } + if b >= c { + break + } + // data[b] > pivot; data[c-1] <= pivot + data[b], data[c-1] = data[c-1], data[b] + b++ + c-- + } + // If hi-c<3 then there are duplicates (by property of median of nine). + // Let's be a bit more conservative, and set border to 5. + protect := hi-c < 5 + if !protect && hi-c < (hi-lo)/4 { + // Lets test some points for equality to pivot + dups := 0 + if data[pivot].freq == data[hi-1].freq && data[pivot].literal > data[hi-1].literal || data[pivot].freq > data[hi-1].freq { // data[hi-1] = pivot + data[c], data[hi-1] = data[hi-1], data[c] + c++ + dups++ + } + if data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq { // data[b-1] = pivot + b-- + dups++ + } + // m-lo = (hi-lo)/2 > 6 + // b-lo > (hi-lo)*3/4-1 > 8 + // ==> m < b ==> data[m] <= pivot + if data[m].freq == data[pivot].freq && data[m].literal > data[pivot].literal || data[m].freq > data[pivot].freq { // data[m] = pivot + data[m], data[b-1] = data[b-1], data[m] + b-- + dups++ + } + // if at least 2 points are equal to pivot, assume skewed distribution + protect = dups > 1 + } + if protect { + // Protect against a lot of duplicates + // Add invariant: + // data[a <= i < b] unexamined + // data[b <= i < c] = pivot + for { + for ; a < b && (data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq); b-- { // data[b] == pivot + } + for ; a < b && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ { // data[a] < pivot + } + if a >= b { + break + } + // data[a] == pivot; data[b-1] < pivot + data[a], data[b-1] = data[b-1], data[a] + a++ + b-- + } + } + // Swap pivot into middle + data[pivot], data[b-1] = data[b-1], data[pivot] + return b - 1, c +} + +// Insertion sort +func insertionSortByFreq(data []literalNode, a, b int) { + for i := a + 1; i < b; i++ { + for j := i; j > a && (data[j].freq == data[j-1].freq && data[j].literal < data[j-1].literal || data[j].freq < data[j-1].freq); j-- { + data[j], data[j-1] = data[j-1], data[j] + } + } +} + +// quickSortByFreq, loosely following Bentley and McIlroy, +// ``Engineering a Sort Function,'' SP&E November 1993. + +// medianOfThreeSortByFreq moves the median of the three values data[m0], data[m1], data[m2] into data[m1]. +func medianOfThreeSortByFreq(data []literalNode, m1, m0, m2 int) { + // sort 3 elements + if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq { + data[m1], data[m0] = data[m0], data[m1] + } + // data[m0] <= data[m1] + if data[m2].freq == data[m1].freq && data[m2].literal < data[m1].literal || data[m2].freq < data[m1].freq { + data[m2], data[m1] = data[m1], data[m2] + // data[m0] <= data[m2] && data[m1] < data[m2] + if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq { + data[m1], data[m0] = data[m0], data[m1] + } + } + // now data[m0] <= data[m1] <= data[m2] +} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go b/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go index 93f1aea109..ffcc176441 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go @@ -1,201 +1,201 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flate - -// Sort sorts data. -// It makes one call to data.Len to determine n, and O(n*log(n)) calls to -// data.Less and data.Swap. The sort is not guaranteed to be stable. -func sortByLiteral(data []literalNode) { - n := len(data) - quickSort(data, 0, n, maxDepth(n)) -} - -func quickSort(data []literalNode, a, b, maxDepth int) { - for b-a > 12 { // Use ShellSort for slices <= 12 elements - if maxDepth == 0 { - heapSort(data, a, b) - return - } - maxDepth-- - mlo, mhi := doPivot(data, a, b) - // Avoiding recursion on the larger subproblem guarantees - // a stack depth of at most lg(b-a). - if mlo-a < b-mhi { - quickSort(data, a, mlo, maxDepth) - a = mhi // i.e., quickSort(data, mhi, b) - } else { - quickSort(data, mhi, b, maxDepth) - b = mlo // i.e., quickSort(data, a, mlo) - } - } - if b-a > 1 { - // Do ShellSort pass with gap 6 - // It could be written in this simplified form cause b-a <= 12 - for i := a + 6; i < b; i++ { - if data[i].literal < data[i-6].literal { - data[i], data[i-6] = data[i-6], data[i] - } - } - insertionSort(data, a, b) - } -} -func heapSort(data []literalNode, a, b int) { - first := a - lo := 0 - hi := b - a - - // Build heap with greatest element at top. - for i := (hi - 1) / 2; i >= 0; i-- { - siftDown(data, i, hi, first) - } - - // Pop elements, largest first, into end of data. - for i := hi - 1; i >= 0; i-- { - data[first], data[first+i] = data[first+i], data[first] - siftDown(data, lo, i, first) - } -} - -// siftDown implements the heap property on data[lo, hi). -// first is an offset into the array where the root of the heap lies. -func siftDown(data []literalNode, lo, hi, first int) { - root := lo - for { - child := 2*root + 1 - if child >= hi { - break - } - if child+1 < hi && data[first+child].literal < data[first+child+1].literal { - child++ - } - if data[first+root].literal > data[first+child].literal { - return - } - data[first+root], data[first+child] = data[first+child], data[first+root] - root = child - } -} -func doPivot(data []literalNode, lo, hi int) (midlo, midhi int) { - m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow. - if hi-lo > 40 { - // Tukey's ``Ninther,'' median of three medians of three. - s := (hi - lo) / 8 - medianOfThree(data, lo, lo+s, lo+2*s) - medianOfThree(data, m, m-s, m+s) - medianOfThree(data, hi-1, hi-1-s, hi-1-2*s) - } - medianOfThree(data, lo, m, hi-1) - - // Invariants are: - // data[lo] = pivot (set up by ChoosePivot) - // data[lo < i < a] < pivot - // data[a <= i < b] <= pivot - // data[b <= i < c] unexamined - // data[c <= i < hi-1] > pivot - // data[hi-1] >= pivot - pivot := lo - a, c := lo+1, hi-1 - - for ; a < c && data[a].literal < data[pivot].literal; a++ { - } - b := a - for { - for ; b < c && data[pivot].literal > data[b].literal; b++ { // data[b] <= pivot - } - for ; b < c && data[pivot].literal < data[c-1].literal; c-- { // data[c-1] > pivot - } - if b >= c { - break - } - // data[b] > pivot; data[c-1] <= pivot - data[b], data[c-1] = data[c-1], data[b] - b++ - c-- - } - // If hi-c<3 then there are duplicates (by property of median of nine). - // Let's be a bit more conservative, and set border to 5. - protect := hi-c < 5 - if !protect && hi-c < (hi-lo)/4 { - // Lets test some points for equality to pivot - dups := 0 - if data[pivot].literal > data[hi-1].literal { // data[hi-1] = pivot - data[c], data[hi-1] = data[hi-1], data[c] - c++ - dups++ - } - if data[b-1].literal > data[pivot].literal { // data[b-1] = pivot - b-- - dups++ - } - // m-lo = (hi-lo)/2 > 6 - // b-lo > (hi-lo)*3/4-1 > 8 - // ==> m < b ==> data[m] <= pivot - if data[m].literal > data[pivot].literal { // data[m] = pivot - data[m], data[b-1] = data[b-1], data[m] - b-- - dups++ - } - // if at least 2 points are equal to pivot, assume skewed distribution - protect = dups > 1 - } - if protect { - // Protect against a lot of duplicates - // Add invariant: - // data[a <= i < b] unexamined - // data[b <= i < c] = pivot - for { - for ; a < b && data[b-1].literal > data[pivot].literal; b-- { // data[b] == pivot - } - for ; a < b && data[a].literal < data[pivot].literal; a++ { // data[a] < pivot - } - if a >= b { - break - } - // data[a] == pivot; data[b-1] < pivot - data[a], data[b-1] = data[b-1], data[a] - a++ - b-- - } - } - // Swap pivot into middle - data[pivot], data[b-1] = data[b-1], data[pivot] - return b - 1, c -} - -// Insertion sort -func insertionSort(data []literalNode, a, b int) { - for i := a + 1; i < b; i++ { - for j := i; j > a && data[j].literal < data[j-1].literal; j-- { - data[j], data[j-1] = data[j-1], data[j] - } - } -} - -// maxDepth returns a threshold at which quicksort should switch -// to heapsort. It returns 2*ceil(lg(n+1)). -func maxDepth(n int) int { - var depth int - for i := n; i > 0; i >>= 1 { - depth++ - } - return depth * 2 -} - -// medianOfThree moves the median of the three values data[m0], data[m1], data[m2] into data[m1]. -func medianOfThree(data []literalNode, m1, m0, m2 int) { - // sort 3 elements - if data[m1].literal < data[m0].literal { - data[m1], data[m0] = data[m0], data[m1] - } - // data[m0] <= data[m1] - if data[m2].literal < data[m1].literal { - data[m2], data[m1] = data[m1], data[m2] - // data[m0] <= data[m2] && data[m1] < data[m2] - if data[m1].literal < data[m0].literal { - data[m1], data[m0] = data[m0], data[m1] - } - } - // now data[m0] <= data[m1] <= data[m2] -} +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +// Sort sorts data. +// It makes one call to data.Len to determine n, and O(n*log(n)) calls to +// data.Less and data.Swap. The sort is not guaranteed to be stable. +func sortByLiteral(data []literalNode) { + n := len(data) + quickSort(data, 0, n, maxDepth(n)) +} + +func quickSort(data []literalNode, a, b, maxDepth int) { + for b-a > 12 { // Use ShellSort for slices <= 12 elements + if maxDepth == 0 { + heapSort(data, a, b) + return + } + maxDepth-- + mlo, mhi := doPivot(data, a, b) + // Avoiding recursion on the larger subproblem guarantees + // a stack depth of at most lg(b-a). + if mlo-a < b-mhi { + quickSort(data, a, mlo, maxDepth) + a = mhi // i.e., quickSort(data, mhi, b) + } else { + quickSort(data, mhi, b, maxDepth) + b = mlo // i.e., quickSort(data, a, mlo) + } + } + if b-a > 1 { + // Do ShellSort pass with gap 6 + // It could be written in this simplified form cause b-a <= 12 + for i := a + 6; i < b; i++ { + if data[i].literal < data[i-6].literal { + data[i], data[i-6] = data[i-6], data[i] + } + } + insertionSort(data, a, b) + } +} +func heapSort(data []literalNode, a, b int) { + first := a + lo := 0 + hi := b - a + + // Build heap with greatest element at top. + for i := (hi - 1) / 2; i >= 0; i-- { + siftDown(data, i, hi, first) + } + + // Pop elements, largest first, into end of data. + for i := hi - 1; i >= 0; i-- { + data[first], data[first+i] = data[first+i], data[first] + siftDown(data, lo, i, first) + } +} + +// siftDown implements the heap property on data[lo, hi). +// first is an offset into the array where the root of the heap lies. +func siftDown(data []literalNode, lo, hi, first int) { + root := lo + for { + child := 2*root + 1 + if child >= hi { + break + } + if child+1 < hi && data[first+child].literal < data[first+child+1].literal { + child++ + } + if data[first+root].literal > data[first+child].literal { + return + } + data[first+root], data[first+child] = data[first+child], data[first+root] + root = child + } +} +func doPivot(data []literalNode, lo, hi int) (midlo, midhi int) { + m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow. + if hi-lo > 40 { + // Tukey's ``Ninther,'' median of three medians of three. + s := (hi - lo) / 8 + medianOfThree(data, lo, lo+s, lo+2*s) + medianOfThree(data, m, m-s, m+s) + medianOfThree(data, hi-1, hi-1-s, hi-1-2*s) + } + medianOfThree(data, lo, m, hi-1) + + // Invariants are: + // data[lo] = pivot (set up by ChoosePivot) + // data[lo < i < a] < pivot + // data[a <= i < b] <= pivot + // data[b <= i < c] unexamined + // data[c <= i < hi-1] > pivot + // data[hi-1] >= pivot + pivot := lo + a, c := lo+1, hi-1 + + for ; a < c && data[a].literal < data[pivot].literal; a++ { + } + b := a + for { + for ; b < c && data[pivot].literal > data[b].literal; b++ { // data[b] <= pivot + } + for ; b < c && data[pivot].literal < data[c-1].literal; c-- { // data[c-1] > pivot + } + if b >= c { + break + } + // data[b] > pivot; data[c-1] <= pivot + data[b], data[c-1] = data[c-1], data[b] + b++ + c-- + } + // If hi-c<3 then there are duplicates (by property of median of nine). + // Let's be a bit more conservative, and set border to 5. + protect := hi-c < 5 + if !protect && hi-c < (hi-lo)/4 { + // Lets test some points for equality to pivot + dups := 0 + if data[pivot].literal > data[hi-1].literal { // data[hi-1] = pivot + data[c], data[hi-1] = data[hi-1], data[c] + c++ + dups++ + } + if data[b-1].literal > data[pivot].literal { // data[b-1] = pivot + b-- + dups++ + } + // m-lo = (hi-lo)/2 > 6 + // b-lo > (hi-lo)*3/4-1 > 8 + // ==> m < b ==> data[m] <= pivot + if data[m].literal > data[pivot].literal { // data[m] = pivot + data[m], data[b-1] = data[b-1], data[m] + b-- + dups++ + } + // if at least 2 points are equal to pivot, assume skewed distribution + protect = dups > 1 + } + if protect { + // Protect against a lot of duplicates + // Add invariant: + // data[a <= i < b] unexamined + // data[b <= i < c] = pivot + for { + for ; a < b && data[b-1].literal > data[pivot].literal; b-- { // data[b] == pivot + } + for ; a < b && data[a].literal < data[pivot].literal; a++ { // data[a] < pivot + } + if a >= b { + break + } + // data[a] == pivot; data[b-1] < pivot + data[a], data[b-1] = data[b-1], data[a] + a++ + b-- + } + } + // Swap pivot into middle + data[pivot], data[b-1] = data[b-1], data[pivot] + return b - 1, c +} + +// Insertion sort +func insertionSort(data []literalNode, a, b int) { + for i := a + 1; i < b; i++ { + for j := i; j > a && data[j].literal < data[j-1].literal; j-- { + data[j], data[j-1] = data[j-1], data[j] + } + } +} + +// maxDepth returns a threshold at which quicksort should switch +// to heapsort. It returns 2*ceil(lg(n+1)). +func maxDepth(n int) int { + var depth int + for i := n; i > 0; i >>= 1 { + depth++ + } + return depth * 2 +} + +// medianOfThree moves the median of the three values data[m0], data[m1], data[m2] into data[m1]. +func medianOfThree(data []literalNode, m1, m0, m2 int) { + // sort 3 elements + if data[m1].literal < data[m0].literal { + data[m1], data[m0] = data[m0], data[m1] + } + // data[m0] <= data[m1] + if data[m2].literal < data[m1].literal { + data[m2], data[m1] = data[m1], data[m2] + // data[m0] <= data[m2] && data[m1] < data[m2] + if data[m1].literal < data[m0].literal { + data[m1], data[m0] = data[m0], data[m1] + } + } + // now data[m0] <= data[m1] <= data[m2] +} diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go index 414c0bea9f..8a19d5edbb 100644 --- a/vendor/github.com/klauspost/compress/flate/inflate.go +++ b/vendor/github.com/klauspost/compress/flate/inflate.go @@ -1,793 +1,793 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package flate implements the DEFLATE compressed data format, described in -// RFC 1951. The gzip and zlib packages implement access to DEFLATE-based file -// formats. -package flate - -import ( - "bufio" - "compress/flate" - "fmt" - "io" - "math/bits" - "sync" -) - -const ( - maxCodeLen = 16 // max length of Huffman code - maxCodeLenMask = 15 // mask for max length of Huffman code - // The next three numbers come from the RFC section 3.2.7, with the - // additional proviso in section 3.2.5 which implies that distance codes - // 30 and 31 should never occur in compressed data. - maxNumLit = 286 - maxNumDist = 30 - numCodes = 19 // number of codes in Huffman meta-code - - debugDecode = false -) - -// Value of length - 3 and extra bits. -type lengthExtra struct { - length, extra uint8 -} - -var decCodeToLen = [32]lengthExtra{{length: 0x0, extra: 0x0}, {length: 0x1, extra: 0x0}, {length: 0x2, extra: 0x0}, {length: 0x3, extra: 0x0}, {length: 0x4, extra: 0x0}, {length: 0x5, extra: 0x0}, {length: 0x6, extra: 0x0}, {length: 0x7, extra: 0x0}, {length: 0x8, extra: 0x1}, {length: 0xa, extra: 0x1}, {length: 0xc, extra: 0x1}, {length: 0xe, extra: 0x1}, {length: 0x10, extra: 0x2}, {length: 0x14, extra: 0x2}, {length: 0x18, extra: 0x2}, {length: 0x1c, extra: 0x2}, {length: 0x20, extra: 0x3}, {length: 0x28, extra: 0x3}, {length: 0x30, extra: 0x3}, {length: 0x38, extra: 0x3}, {length: 0x40, extra: 0x4}, {length: 0x50, extra: 0x4}, {length: 0x60, extra: 0x4}, {length: 0x70, extra: 0x4}, {length: 0x80, extra: 0x5}, {length: 0xa0, extra: 0x5}, {length: 0xc0, extra: 0x5}, {length: 0xe0, extra: 0x5}, {length: 0xff, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}} - -var bitMask32 = [32]uint32{ - 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, - 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, - 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF, - 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF, -} // up to 32 bits - -// Initialize the fixedHuffmanDecoder only once upon first use. -var fixedOnce sync.Once -var fixedHuffmanDecoder huffmanDecoder - -// A CorruptInputError reports the presence of corrupt input at a given offset. -type CorruptInputError = flate.CorruptInputError - -// An InternalError reports an error in the flate code itself. -type InternalError string - -func (e InternalError) Error() string { return "flate: internal error: " + string(e) } - -// A ReadError reports an error encountered while reading input. -// -// Deprecated: No longer returned. -type ReadError = flate.ReadError - -// A WriteError reports an error encountered while writing output. -// -// Deprecated: No longer returned. -type WriteError = flate.WriteError - -// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to -// to switch to a new underlying Reader. This permits reusing a ReadCloser -// instead of allocating a new one. -type Resetter interface { - // Reset discards any buffered data and resets the Resetter as if it was - // newly initialized with the given reader. - Reset(r io.Reader, dict []byte) error -} - -// The data structure for decoding Huffman tables is based on that of -// zlib. There is a lookup table of a fixed bit width (huffmanChunkBits), -// For codes smaller than the table width, there are multiple entries -// (each combination of trailing bits has the same value). For codes -// larger than the table width, the table contains a link to an overflow -// table. The width of each entry in the link table is the maximum code -// size minus the chunk width. -// -// Note that you can do a lookup in the table even without all bits -// filled. Since the extra bits are zero, and the DEFLATE Huffman codes -// have the property that shorter codes come before longer ones, the -// bit length estimate in the result is a lower bound on the actual -// number of bits. -// -// See the following: -// http://www.gzip.org/algorithm.txt - -// chunk & 15 is number of bits -// chunk >> 4 is value, including table link - -const ( - huffmanChunkBits = 9 - huffmanNumChunks = 1 << huffmanChunkBits - huffmanCountMask = 15 - huffmanValueShift = 4 -) - -type huffmanDecoder struct { - maxRead int // the maximum number of bits we can read and not overread - chunks *[huffmanNumChunks]uint16 // chunks as described above - links [][]uint16 // overflow links - linkMask uint32 // mask the width of the link table -} - -// Initialize Huffman decoding tables from array of code lengths. -// Following this function, h is guaranteed to be initialized into a complete -// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a -// degenerate case where the tree has only a single symbol with length 1. Empty -// trees are permitted. -func (h *huffmanDecoder) init(lengths []int) bool { - // Sanity enables additional runtime tests during Huffman - // table construction. It's intended to be used during - // development to supplement the currently ad-hoc unit tests. - const sanity = false - - if h.chunks == nil { - h.chunks = &[huffmanNumChunks]uint16{} - } - if h.maxRead != 0 { - *h = huffmanDecoder{chunks: h.chunks, links: h.links} - } - - // Count number of codes of each length, - // compute maxRead and max length. - var count [maxCodeLen]int - var min, max int - for _, n := range lengths { - if n == 0 { - continue - } - if min == 0 || n < min { - min = n - } - if n > max { - max = n - } - count[n&maxCodeLenMask]++ - } - - // Empty tree. The decompressor.huffSym function will fail later if the tree - // is used. Technically, an empty tree is only valid for the HDIST tree and - // not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree - // is guaranteed to fail since it will attempt to use the tree to decode the - // codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is - // guaranteed to fail later since the compressed data section must be - // composed of at least one symbol (the end-of-block marker). - if max == 0 { - return true - } - - code := 0 - var nextcode [maxCodeLen]int - for i := min; i <= max; i++ { - code <<= 1 - nextcode[i&maxCodeLenMask] = code - code += count[i&maxCodeLenMask] - } - - // Check that the coding is complete (i.e., that we've - // assigned all 2-to-the-max possible bit sequences). - // Exception: To be compatible with zlib, we also need to - // accept degenerate single-code codings. See also - // TestDegenerateHuffmanCoding. - if code != 1< huffmanChunkBits { - numLinks := 1 << (uint(max) - huffmanChunkBits) - h.linkMask = uint32(numLinks - 1) - - // create link tables - link := nextcode[huffmanChunkBits+1] >> 1 - if cap(h.links) < huffmanNumChunks-link { - h.links = make([][]uint16, huffmanNumChunks-link) - } else { - h.links = h.links[:huffmanNumChunks-link] - } - for j := uint(link); j < huffmanNumChunks; j++ { - reverse := int(bits.Reverse16(uint16(j))) - reverse >>= uint(16 - huffmanChunkBits) - off := j - uint(link) - if sanity && h.chunks[reverse] != 0 { - panic("impossible: overwriting existing chunk") - } - h.chunks[reverse] = uint16(off<>= uint(16 - n) - if n <= huffmanChunkBits { - for off := reverse; off < len(h.chunks); off += 1 << uint(n) { - // We should never need to overwrite - // an existing chunk. Also, 0 is - // never a valid chunk, because the - // lower 4 "count" bits should be - // between 1 and 15. - if sanity && h.chunks[off] != 0 { - panic("impossible: overwriting existing chunk") - } - h.chunks[off] = chunk - } - } else { - j := reverse & (huffmanNumChunks - 1) - if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 { - // Longer codes should have been - // associated with a link table above. - panic("impossible: not an indirect chunk") - } - value := h.chunks[j] >> huffmanValueShift - linktab := h.links[value] - reverse >>= huffmanChunkBits - for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) { - if sanity && linktab[off] != 0 { - panic("impossible: overwriting existing chunk") - } - linktab[off] = chunk - } - } - } - - if sanity { - // Above we've sanity checked that we never overwrote - // an existing entry. Here we additionally check that - // we filled the tables completely. - for i, chunk := range h.chunks { - if chunk == 0 { - // As an exception, in the degenerate - // single-code case, we allow odd - // chunks to be missing. - if code == 1 && i%2 == 1 { - continue - } - panic("impossible: missing chunk") - } - } - for _, linktab := range h.links { - for _, chunk := range linktab { - if chunk == 0 { - panic("impossible: missing chunk") - } - } - } - } - - return true -} - -// The actual read interface needed by NewReader. -// If the passed in io.Reader does not also have ReadByte, -// the NewReader will introduce its own buffering. -type Reader interface { - io.Reader - io.ByteReader -} - -// Decompress state. -type decompressor struct { - // Input source. - r Reader - roffset int64 - - // Huffman decoders for literal/length, distance. - h1, h2 huffmanDecoder - - // Length arrays used to define Huffman codes. - bits *[maxNumLit + maxNumDist]int - codebits *[numCodes]int - - // Output history, buffer. - dict dictDecoder - - // Next step in the decompression, - // and decompression state. - step func(*decompressor) - stepState int - err error - toRead []byte - hl, hd *huffmanDecoder - copyLen int - copyDist int - - // Temporary buffer (avoids repeated allocation). - buf [4]byte - - // Input bits, in top of b. - b uint32 - - nb uint - final bool -} - -func (f *decompressor) nextBlock() { - for f.nb < 1+2 { - if f.err = f.moreBits(); f.err != nil { - return - } - } - f.final = f.b&1 == 1 - f.b >>= 1 - typ := f.b & 3 - f.b >>= 2 - f.nb -= 1 + 2 - switch typ { - case 0: - f.dataBlock() - if debugDecode { - fmt.Println("stored block") - } - case 1: - // compressed, fixed Huffman tables - f.hl = &fixedHuffmanDecoder - f.hd = nil - f.huffmanBlockDecoder()() - if debugDecode { - fmt.Println("predefinied huffman block") - } - case 2: - // compressed, dynamic Huffman tables - if f.err = f.readHuffman(); f.err != nil { - break - } - f.hl = &f.h1 - f.hd = &f.h2 - f.huffmanBlockDecoder()() - if debugDecode { - fmt.Println("dynamic huffman block") - } - default: - // 3 is reserved. - if debugDecode { - fmt.Println("reserved data block encountered") - } - f.err = CorruptInputError(f.roffset) - } -} - -func (f *decompressor) Read(b []byte) (int, error) { - for { - if len(f.toRead) > 0 { - n := copy(b, f.toRead) - f.toRead = f.toRead[n:] - if len(f.toRead) == 0 { - return n, f.err - } - return n, nil - } - if f.err != nil { - return 0, f.err - } - f.step(f) - if f.err != nil && len(f.toRead) == 0 { - f.toRead = f.dict.readFlush() // Flush what's left in case of error - } - } -} - -// Support the io.WriteTo interface for io.Copy and friends. -func (f *decompressor) WriteTo(w io.Writer) (int64, error) { - total := int64(0) - flushed := false - for { - if len(f.toRead) > 0 { - n, err := w.Write(f.toRead) - total += int64(n) - if err != nil { - f.err = err - return total, err - } - if n != len(f.toRead) { - return total, io.ErrShortWrite - } - f.toRead = f.toRead[:0] - } - if f.err != nil && flushed { - if f.err == io.EOF { - return total, nil - } - return total, f.err - } - if f.err == nil { - f.step(f) - } - if len(f.toRead) == 0 && f.err != nil && !flushed { - f.toRead = f.dict.readFlush() // Flush what's left in case of error - flushed = true - } - } -} - -func (f *decompressor) Close() error { - if f.err == io.EOF { - return nil - } - return f.err -} - -// RFC 1951 section 3.2.7. -// Compression with dynamic Huffman codes - -var codeOrder = [...]int{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15} - -func (f *decompressor) readHuffman() error { - // HLIT[5], HDIST[5], HCLEN[4]. - for f.nb < 5+5+4 { - if err := f.moreBits(); err != nil { - return err - } - } - nlit := int(f.b&0x1F) + 257 - if nlit > maxNumLit { - if debugDecode { - fmt.Println("nlit > maxNumLit", nlit) - } - return CorruptInputError(f.roffset) - } - f.b >>= 5 - ndist := int(f.b&0x1F) + 1 - if ndist > maxNumDist { - if debugDecode { - fmt.Println("ndist > maxNumDist", ndist) - } - return CorruptInputError(f.roffset) - } - f.b >>= 5 - nclen := int(f.b&0xF) + 4 - // numCodes is 19, so nclen is always valid. - f.b >>= 4 - f.nb -= 5 + 5 + 4 - - // (HCLEN+4)*3 bits: code lengths in the magic codeOrder order. - for i := 0; i < nclen; i++ { - for f.nb < 3 { - if err := f.moreBits(); err != nil { - return err - } - } - f.codebits[codeOrder[i]] = int(f.b & 0x7) - f.b >>= 3 - f.nb -= 3 - } - for i := nclen; i < len(codeOrder); i++ { - f.codebits[codeOrder[i]] = 0 - } - if !f.h1.init(f.codebits[0:]) { - if debugDecode { - fmt.Println("init codebits failed") - } - return CorruptInputError(f.roffset) - } - - // HLIT + 257 code lengths, HDIST + 1 code lengths, - // using the code length Huffman code. - for i, n := 0, nlit+ndist; i < n; { - x, err := f.huffSym(&f.h1) - if err != nil { - return err - } - if x < 16 { - // Actual length. - f.bits[i] = x - i++ - continue - } - // Repeat previous length or zero. - var rep int - var nb uint - var b int - switch x { - default: - return InternalError("unexpected length code") - case 16: - rep = 3 - nb = 2 - if i == 0 { - if debugDecode { - fmt.Println("i==0") - } - return CorruptInputError(f.roffset) - } - b = f.bits[i-1] - case 17: - rep = 3 - nb = 3 - b = 0 - case 18: - rep = 11 - nb = 7 - b = 0 - } - for f.nb < nb { - if err := f.moreBits(); err != nil { - if debugDecode { - fmt.Println("morebits:", err) - } - return err - } - } - rep += int(f.b & uint32(1<<(nb®SizeMaskUint32)-1)) - f.b >>= nb & regSizeMaskUint32 - f.nb -= nb - if i+rep > n { - if debugDecode { - fmt.Println("i+rep > n", i, rep, n) - } - return CorruptInputError(f.roffset) - } - for j := 0; j < rep; j++ { - f.bits[i] = b - i++ - } - } - - if !f.h1.init(f.bits[0:nlit]) || !f.h2.init(f.bits[nlit:nlit+ndist]) { - if debugDecode { - fmt.Println("init2 failed") - } - return CorruptInputError(f.roffset) - } - - // As an optimization, we can initialize the maxRead bits to read at a time - // for the HLIT tree to the length of the EOB marker since we know that - // every block must terminate with one. This preserves the property that - // we never read any extra bytes after the end of the DEFLATE stream. - if f.h1.maxRead < f.bits[endBlockMarker] { - f.h1.maxRead = f.bits[endBlockMarker] - } - if !f.final { - // If not the final block, the smallest block possible is - // a predefined table, BTYPE=01, with a single EOB marker. - // This will take up 3 + 7 bits. - f.h1.maxRead += 10 - } - - return nil -} - -// Copy a single uncompressed data block from input to output. -func (f *decompressor) dataBlock() { - // Uncompressed. - // Discard current half-byte. - left := (f.nb) & 7 - f.nb -= left - f.b >>= left - - offBytes := f.nb >> 3 - // Unfilled values will be overwritten. - f.buf[0] = uint8(f.b) - f.buf[1] = uint8(f.b >> 8) - f.buf[2] = uint8(f.b >> 16) - f.buf[3] = uint8(f.b >> 24) - - f.roffset += int64(offBytes) - f.nb, f.b = 0, 0 - - // Length then ones-complement of length. - nr, err := io.ReadFull(f.r, f.buf[offBytes:4]) - f.roffset += int64(nr) - if err != nil { - f.err = noEOF(err) - return - } - n := uint16(f.buf[0]) | uint16(f.buf[1])<<8 - nn := uint16(f.buf[2]) | uint16(f.buf[3])<<8 - if nn != ^n { - if debugDecode { - ncomp := ^n - fmt.Println("uint16(nn) != uint16(^n)", nn, ncomp) - } - f.err = CorruptInputError(f.roffset) - return - } - - if n == 0 { - f.toRead = f.dict.readFlush() - f.finishBlock() - return - } - - f.copyLen = int(n) - f.copyData() -} - -// copyData copies f.copyLen bytes from the underlying reader into f.hist. -// It pauses for reads when f.hist is full. -func (f *decompressor) copyData() { - buf := f.dict.writeSlice() - if len(buf) > f.copyLen { - buf = buf[:f.copyLen] - } - - cnt, err := io.ReadFull(f.r, buf) - f.roffset += int64(cnt) - f.copyLen -= cnt - f.dict.writeMark(cnt) - if err != nil { - f.err = noEOF(err) - return - } - - if f.dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = f.dict.readFlush() - f.step = (*decompressor).copyData - return - } - f.finishBlock() -} - -func (f *decompressor) finishBlock() { - if f.final { - if f.dict.availRead() > 0 { - f.toRead = f.dict.readFlush() - } - f.err = io.EOF - } - f.step = (*decompressor).nextBlock -} - -// noEOF returns err, unless err == io.EOF, in which case it returns io.ErrUnexpectedEOF. -func noEOF(e error) error { - if e == io.EOF { - return io.ErrUnexpectedEOF - } - return e -} - -func (f *decompressor) moreBits() error { - c, err := f.r.ReadByte() - if err != nil { - return noEOF(err) - } - f.roffset++ - f.b |= uint32(c) << (f.nb & regSizeMaskUint32) - f.nb += 8 - return nil -} - -// Read the next Huffman-encoded symbol from f according to h. -func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) { - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(h.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b - for { - for nb < n { - c, err := f.r.ReadByte() - if err != nil { - f.b = b - f.nb = nb - return 0, noEOF(err) - } - f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 - } - chunk := h.chunks[b&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = h.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&h.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= nb { - if n == 0 { - f.b = b - f.nb = nb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return 0, f.err - } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n - return int(chunk >> huffmanValueShift), nil - } - } -} - -func makeReader(r io.Reader) Reader { - if rr, ok := r.(Reader); ok { - return rr - } - return bufio.NewReader(r) -} - -func fixedHuffmanDecoderInit() { - fixedOnce.Do(func() { - // These come from the RFC section 3.2.6. - var bits [288]int - for i := 0; i < 144; i++ { - bits[i] = 8 - } - for i := 144; i < 256; i++ { - bits[i] = 9 - } - for i := 256; i < 280; i++ { - bits[i] = 7 - } - for i := 280; i < 288; i++ { - bits[i] = 8 - } - fixedHuffmanDecoder.init(bits[:]) - }) -} - -func (f *decompressor) Reset(r io.Reader, dict []byte) error { - *f = decompressor{ - r: makeReader(r), - bits: f.bits, - codebits: f.codebits, - h1: f.h1, - h2: f.h2, - dict: f.dict, - step: (*decompressor).nextBlock, - } - f.dict.init(maxMatchOffset, dict) - return nil -} - -// NewReader returns a new ReadCloser that can be used -// to read the uncompressed version of r. -// If r does not also implement io.ByteReader, -// the decompressor may read more data than necessary from r. -// It is the caller's responsibility to call Close on the ReadCloser -// when finished reading. -// -// The ReadCloser returned by NewReader also implements Resetter. -func NewReader(r io.Reader) io.ReadCloser { - fixedHuffmanDecoderInit() - - var f decompressor - f.r = makeReader(r) - f.bits = new([maxNumLit + maxNumDist]int) - f.codebits = new([numCodes]int) - f.step = (*decompressor).nextBlock - f.dict.init(maxMatchOffset, nil) - return &f -} - -// NewReaderDict is like NewReader but initializes the reader -// with a preset dictionary. The returned Reader behaves as if -// the uncompressed data stream started with the given dictionary, -// which has already been read. NewReaderDict is typically used -// to read data compressed by NewWriterDict. -// -// The ReadCloser returned by NewReader also implements Resetter. -func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser { - fixedHuffmanDecoderInit() - - var f decompressor - f.r = makeReader(r) - f.bits = new([maxNumLit + maxNumDist]int) - f.codebits = new([numCodes]int) - f.step = (*decompressor).nextBlock - f.dict.init(maxMatchOffset, dict) - return &f -} +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package flate implements the DEFLATE compressed data format, described in +// RFC 1951. The gzip and zlib packages implement access to DEFLATE-based file +// formats. +package flate + +import ( + "bufio" + "compress/flate" + "fmt" + "io" + "math/bits" + "sync" +) + +const ( + maxCodeLen = 16 // max length of Huffman code + maxCodeLenMask = 15 // mask for max length of Huffman code + // The next three numbers come from the RFC section 3.2.7, with the + // additional proviso in section 3.2.5 which implies that distance codes + // 30 and 31 should never occur in compressed data. + maxNumLit = 286 + maxNumDist = 30 + numCodes = 19 // number of codes in Huffman meta-code + + debugDecode = false +) + +// Value of length - 3 and extra bits. +type lengthExtra struct { + length, extra uint8 +} + +var decCodeToLen = [32]lengthExtra{{length: 0x0, extra: 0x0}, {length: 0x1, extra: 0x0}, {length: 0x2, extra: 0x0}, {length: 0x3, extra: 0x0}, {length: 0x4, extra: 0x0}, {length: 0x5, extra: 0x0}, {length: 0x6, extra: 0x0}, {length: 0x7, extra: 0x0}, {length: 0x8, extra: 0x1}, {length: 0xa, extra: 0x1}, {length: 0xc, extra: 0x1}, {length: 0xe, extra: 0x1}, {length: 0x10, extra: 0x2}, {length: 0x14, extra: 0x2}, {length: 0x18, extra: 0x2}, {length: 0x1c, extra: 0x2}, {length: 0x20, extra: 0x3}, {length: 0x28, extra: 0x3}, {length: 0x30, extra: 0x3}, {length: 0x38, extra: 0x3}, {length: 0x40, extra: 0x4}, {length: 0x50, extra: 0x4}, {length: 0x60, extra: 0x4}, {length: 0x70, extra: 0x4}, {length: 0x80, extra: 0x5}, {length: 0xa0, extra: 0x5}, {length: 0xc0, extra: 0x5}, {length: 0xe0, extra: 0x5}, {length: 0xff, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}} + +var bitMask32 = [32]uint32{ + 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, + 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, + 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF, + 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF, +} // up to 32 bits + +// Initialize the fixedHuffmanDecoder only once upon first use. +var fixedOnce sync.Once +var fixedHuffmanDecoder huffmanDecoder + +// A CorruptInputError reports the presence of corrupt input at a given offset. +type CorruptInputError = flate.CorruptInputError + +// An InternalError reports an error in the flate code itself. +type InternalError string + +func (e InternalError) Error() string { return "flate: internal error: " + string(e) } + +// A ReadError reports an error encountered while reading input. +// +// Deprecated: No longer returned. +type ReadError = flate.ReadError + +// A WriteError reports an error encountered while writing output. +// +// Deprecated: No longer returned. +type WriteError = flate.WriteError + +// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to +// to switch to a new underlying Reader. This permits reusing a ReadCloser +// instead of allocating a new one. +type Resetter interface { + // Reset discards any buffered data and resets the Resetter as if it was + // newly initialized with the given reader. + Reset(r io.Reader, dict []byte) error +} + +// The data structure for decoding Huffman tables is based on that of +// zlib. There is a lookup table of a fixed bit width (huffmanChunkBits), +// For codes smaller than the table width, there are multiple entries +// (each combination of trailing bits has the same value). For codes +// larger than the table width, the table contains a link to an overflow +// table. The width of each entry in the link table is the maximum code +// size minus the chunk width. +// +// Note that you can do a lookup in the table even without all bits +// filled. Since the extra bits are zero, and the DEFLATE Huffman codes +// have the property that shorter codes come before longer ones, the +// bit length estimate in the result is a lower bound on the actual +// number of bits. +// +// See the following: +// http://www.gzip.org/algorithm.txt + +// chunk & 15 is number of bits +// chunk >> 4 is value, including table link + +const ( + huffmanChunkBits = 9 + huffmanNumChunks = 1 << huffmanChunkBits + huffmanCountMask = 15 + huffmanValueShift = 4 +) + +type huffmanDecoder struct { + maxRead int // the maximum number of bits we can read and not overread + chunks *[huffmanNumChunks]uint16 // chunks as described above + links [][]uint16 // overflow links + linkMask uint32 // mask the width of the link table +} + +// Initialize Huffman decoding tables from array of code lengths. +// Following this function, h is guaranteed to be initialized into a complete +// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a +// degenerate case where the tree has only a single symbol with length 1. Empty +// trees are permitted. +func (h *huffmanDecoder) init(lengths []int) bool { + // Sanity enables additional runtime tests during Huffman + // table construction. It's intended to be used during + // development to supplement the currently ad-hoc unit tests. + const sanity = false + + if h.chunks == nil { + h.chunks = &[huffmanNumChunks]uint16{} + } + if h.maxRead != 0 { + *h = huffmanDecoder{chunks: h.chunks, links: h.links} + } + + // Count number of codes of each length, + // compute maxRead and max length. + var count [maxCodeLen]int + var min, max int + for _, n := range lengths { + if n == 0 { + continue + } + if min == 0 || n < min { + min = n + } + if n > max { + max = n + } + count[n&maxCodeLenMask]++ + } + + // Empty tree. The decompressor.huffSym function will fail later if the tree + // is used. Technically, an empty tree is only valid for the HDIST tree and + // not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree + // is guaranteed to fail since it will attempt to use the tree to decode the + // codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is + // guaranteed to fail later since the compressed data section must be + // composed of at least one symbol (the end-of-block marker). + if max == 0 { + return true + } + + code := 0 + var nextcode [maxCodeLen]int + for i := min; i <= max; i++ { + code <<= 1 + nextcode[i&maxCodeLenMask] = code + code += count[i&maxCodeLenMask] + } + + // Check that the coding is complete (i.e., that we've + // assigned all 2-to-the-max possible bit sequences). + // Exception: To be compatible with zlib, we also need to + // accept degenerate single-code codings. See also + // TestDegenerateHuffmanCoding. + if code != 1< huffmanChunkBits { + numLinks := 1 << (uint(max) - huffmanChunkBits) + h.linkMask = uint32(numLinks - 1) + + // create link tables + link := nextcode[huffmanChunkBits+1] >> 1 + if cap(h.links) < huffmanNumChunks-link { + h.links = make([][]uint16, huffmanNumChunks-link) + } else { + h.links = h.links[:huffmanNumChunks-link] + } + for j := uint(link); j < huffmanNumChunks; j++ { + reverse := int(bits.Reverse16(uint16(j))) + reverse >>= uint(16 - huffmanChunkBits) + off := j - uint(link) + if sanity && h.chunks[reverse] != 0 { + panic("impossible: overwriting existing chunk") + } + h.chunks[reverse] = uint16(off<>= uint(16 - n) + if n <= huffmanChunkBits { + for off := reverse; off < len(h.chunks); off += 1 << uint(n) { + // We should never need to overwrite + // an existing chunk. Also, 0 is + // never a valid chunk, because the + // lower 4 "count" bits should be + // between 1 and 15. + if sanity && h.chunks[off] != 0 { + panic("impossible: overwriting existing chunk") + } + h.chunks[off] = chunk + } + } else { + j := reverse & (huffmanNumChunks - 1) + if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 { + // Longer codes should have been + // associated with a link table above. + panic("impossible: not an indirect chunk") + } + value := h.chunks[j] >> huffmanValueShift + linktab := h.links[value] + reverse >>= huffmanChunkBits + for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) { + if sanity && linktab[off] != 0 { + panic("impossible: overwriting existing chunk") + } + linktab[off] = chunk + } + } + } + + if sanity { + // Above we've sanity checked that we never overwrote + // an existing entry. Here we additionally check that + // we filled the tables completely. + for i, chunk := range h.chunks { + if chunk == 0 { + // As an exception, in the degenerate + // single-code case, we allow odd + // chunks to be missing. + if code == 1 && i%2 == 1 { + continue + } + panic("impossible: missing chunk") + } + } + for _, linktab := range h.links { + for _, chunk := range linktab { + if chunk == 0 { + panic("impossible: missing chunk") + } + } + } + } + + return true +} + +// The actual read interface needed by NewReader. +// If the passed in io.Reader does not also have ReadByte, +// the NewReader will introduce its own buffering. +type Reader interface { + io.Reader + io.ByteReader +} + +// Decompress state. +type decompressor struct { + // Input source. + r Reader + roffset int64 + + // Huffman decoders for literal/length, distance. + h1, h2 huffmanDecoder + + // Length arrays used to define Huffman codes. + bits *[maxNumLit + maxNumDist]int + codebits *[numCodes]int + + // Output history, buffer. + dict dictDecoder + + // Next step in the decompression, + // and decompression state. + step func(*decompressor) + stepState int + err error + toRead []byte + hl, hd *huffmanDecoder + copyLen int + copyDist int + + // Temporary buffer (avoids repeated allocation). + buf [4]byte + + // Input bits, in top of b. + b uint32 + + nb uint + final bool +} + +func (f *decompressor) nextBlock() { + for f.nb < 1+2 { + if f.err = f.moreBits(); f.err != nil { + return + } + } + f.final = f.b&1 == 1 + f.b >>= 1 + typ := f.b & 3 + f.b >>= 2 + f.nb -= 1 + 2 + switch typ { + case 0: + f.dataBlock() + if debugDecode { + fmt.Println("stored block") + } + case 1: + // compressed, fixed Huffman tables + f.hl = &fixedHuffmanDecoder + f.hd = nil + f.huffmanBlockDecoder()() + if debugDecode { + fmt.Println("predefinied huffman block") + } + case 2: + // compressed, dynamic Huffman tables + if f.err = f.readHuffman(); f.err != nil { + break + } + f.hl = &f.h1 + f.hd = &f.h2 + f.huffmanBlockDecoder()() + if debugDecode { + fmt.Println("dynamic huffman block") + } + default: + // 3 is reserved. + if debugDecode { + fmt.Println("reserved data block encountered") + } + f.err = CorruptInputError(f.roffset) + } +} + +func (f *decompressor) Read(b []byte) (int, error) { + for { + if len(f.toRead) > 0 { + n := copy(b, f.toRead) + f.toRead = f.toRead[n:] + if len(f.toRead) == 0 { + return n, f.err + } + return n, nil + } + if f.err != nil { + return 0, f.err + } + f.step(f) + if f.err != nil && len(f.toRead) == 0 { + f.toRead = f.dict.readFlush() // Flush what's left in case of error + } + } +} + +// Support the io.WriteTo interface for io.Copy and friends. +func (f *decompressor) WriteTo(w io.Writer) (int64, error) { + total := int64(0) + flushed := false + for { + if len(f.toRead) > 0 { + n, err := w.Write(f.toRead) + total += int64(n) + if err != nil { + f.err = err + return total, err + } + if n != len(f.toRead) { + return total, io.ErrShortWrite + } + f.toRead = f.toRead[:0] + } + if f.err != nil && flushed { + if f.err == io.EOF { + return total, nil + } + return total, f.err + } + if f.err == nil { + f.step(f) + } + if len(f.toRead) == 0 && f.err != nil && !flushed { + f.toRead = f.dict.readFlush() // Flush what's left in case of error + flushed = true + } + } +} + +func (f *decompressor) Close() error { + if f.err == io.EOF { + return nil + } + return f.err +} + +// RFC 1951 section 3.2.7. +// Compression with dynamic Huffman codes + +var codeOrder = [...]int{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15} + +func (f *decompressor) readHuffman() error { + // HLIT[5], HDIST[5], HCLEN[4]. + for f.nb < 5+5+4 { + if err := f.moreBits(); err != nil { + return err + } + } + nlit := int(f.b&0x1F) + 257 + if nlit > maxNumLit { + if debugDecode { + fmt.Println("nlit > maxNumLit", nlit) + } + return CorruptInputError(f.roffset) + } + f.b >>= 5 + ndist := int(f.b&0x1F) + 1 + if ndist > maxNumDist { + if debugDecode { + fmt.Println("ndist > maxNumDist", ndist) + } + return CorruptInputError(f.roffset) + } + f.b >>= 5 + nclen := int(f.b&0xF) + 4 + // numCodes is 19, so nclen is always valid. + f.b >>= 4 + f.nb -= 5 + 5 + 4 + + // (HCLEN+4)*3 bits: code lengths in the magic codeOrder order. + for i := 0; i < nclen; i++ { + for f.nb < 3 { + if err := f.moreBits(); err != nil { + return err + } + } + f.codebits[codeOrder[i]] = int(f.b & 0x7) + f.b >>= 3 + f.nb -= 3 + } + for i := nclen; i < len(codeOrder); i++ { + f.codebits[codeOrder[i]] = 0 + } + if !f.h1.init(f.codebits[0:]) { + if debugDecode { + fmt.Println("init codebits failed") + } + return CorruptInputError(f.roffset) + } + + // HLIT + 257 code lengths, HDIST + 1 code lengths, + // using the code length Huffman code. + for i, n := 0, nlit+ndist; i < n; { + x, err := f.huffSym(&f.h1) + if err != nil { + return err + } + if x < 16 { + // Actual length. + f.bits[i] = x + i++ + continue + } + // Repeat previous length or zero. + var rep int + var nb uint + var b int + switch x { + default: + return InternalError("unexpected length code") + case 16: + rep = 3 + nb = 2 + if i == 0 { + if debugDecode { + fmt.Println("i==0") + } + return CorruptInputError(f.roffset) + } + b = f.bits[i-1] + case 17: + rep = 3 + nb = 3 + b = 0 + case 18: + rep = 11 + nb = 7 + b = 0 + } + for f.nb < nb { + if err := f.moreBits(); err != nil { + if debugDecode { + fmt.Println("morebits:", err) + } + return err + } + } + rep += int(f.b & uint32(1<<(nb®SizeMaskUint32)-1)) + f.b >>= nb & regSizeMaskUint32 + f.nb -= nb + if i+rep > n { + if debugDecode { + fmt.Println("i+rep > n", i, rep, n) + } + return CorruptInputError(f.roffset) + } + for j := 0; j < rep; j++ { + f.bits[i] = b + i++ + } + } + + if !f.h1.init(f.bits[0:nlit]) || !f.h2.init(f.bits[nlit:nlit+ndist]) { + if debugDecode { + fmt.Println("init2 failed") + } + return CorruptInputError(f.roffset) + } + + // As an optimization, we can initialize the maxRead bits to read at a time + // for the HLIT tree to the length of the EOB marker since we know that + // every block must terminate with one. This preserves the property that + // we never read any extra bytes after the end of the DEFLATE stream. + if f.h1.maxRead < f.bits[endBlockMarker] { + f.h1.maxRead = f.bits[endBlockMarker] + } + if !f.final { + // If not the final block, the smallest block possible is + // a predefined table, BTYPE=01, with a single EOB marker. + // This will take up 3 + 7 bits. + f.h1.maxRead += 10 + } + + return nil +} + +// Copy a single uncompressed data block from input to output. +func (f *decompressor) dataBlock() { + // Uncompressed. + // Discard current half-byte. + left := (f.nb) & 7 + f.nb -= left + f.b >>= left + + offBytes := f.nb >> 3 + // Unfilled values will be overwritten. + f.buf[0] = uint8(f.b) + f.buf[1] = uint8(f.b >> 8) + f.buf[2] = uint8(f.b >> 16) + f.buf[3] = uint8(f.b >> 24) + + f.roffset += int64(offBytes) + f.nb, f.b = 0, 0 + + // Length then ones-complement of length. + nr, err := io.ReadFull(f.r, f.buf[offBytes:4]) + f.roffset += int64(nr) + if err != nil { + f.err = noEOF(err) + return + } + n := uint16(f.buf[0]) | uint16(f.buf[1])<<8 + nn := uint16(f.buf[2]) | uint16(f.buf[3])<<8 + if nn != ^n { + if debugDecode { + ncomp := ^n + fmt.Println("uint16(nn) != uint16(^n)", nn, ncomp) + } + f.err = CorruptInputError(f.roffset) + return + } + + if n == 0 { + f.toRead = f.dict.readFlush() + f.finishBlock() + return + } + + f.copyLen = int(n) + f.copyData() +} + +// copyData copies f.copyLen bytes from the underlying reader into f.hist. +// It pauses for reads when f.hist is full. +func (f *decompressor) copyData() { + buf := f.dict.writeSlice() + if len(buf) > f.copyLen { + buf = buf[:f.copyLen] + } + + cnt, err := io.ReadFull(f.r, buf) + f.roffset += int64(cnt) + f.copyLen -= cnt + f.dict.writeMark(cnt) + if err != nil { + f.err = noEOF(err) + return + } + + if f.dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = f.dict.readFlush() + f.step = (*decompressor).copyData + return + } + f.finishBlock() +} + +func (f *decompressor) finishBlock() { + if f.final { + if f.dict.availRead() > 0 { + f.toRead = f.dict.readFlush() + } + f.err = io.EOF + } + f.step = (*decompressor).nextBlock +} + +// noEOF returns err, unless err == io.EOF, in which case it returns io.ErrUnexpectedEOF. +func noEOF(e error) error { + if e == io.EOF { + return io.ErrUnexpectedEOF + } + return e +} + +func (f *decompressor) moreBits() error { + c, err := f.r.ReadByte() + if err != nil { + return noEOF(err) + } + f.roffset++ + f.b |= uint32(c) << (f.nb & regSizeMaskUint32) + f.nb += 8 + return nil +} + +// Read the next Huffman-encoded symbol from f according to h. +func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) { + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(h.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + nb, b := f.nb, f.b + for { + for nb < n { + c, err := f.r.ReadByte() + if err != nil { + f.b = b + f.nb = nb + return 0, noEOF(err) + } + f.roffset++ + b |= uint32(c) << (nb & regSizeMaskUint32) + nb += 8 + } + chunk := h.chunks[b&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = h.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&h.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= nb { + if n == 0 { + f.b = b + f.nb = nb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return 0, f.err + } + f.b = b >> (n & regSizeMaskUint32) + f.nb = nb - n + return int(chunk >> huffmanValueShift), nil + } + } +} + +func makeReader(r io.Reader) Reader { + if rr, ok := r.(Reader); ok { + return rr + } + return bufio.NewReader(r) +} + +func fixedHuffmanDecoderInit() { + fixedOnce.Do(func() { + // These come from the RFC section 3.2.6. + var bits [288]int + for i := 0; i < 144; i++ { + bits[i] = 8 + } + for i := 144; i < 256; i++ { + bits[i] = 9 + } + for i := 256; i < 280; i++ { + bits[i] = 7 + } + for i := 280; i < 288; i++ { + bits[i] = 8 + } + fixedHuffmanDecoder.init(bits[:]) + }) +} + +func (f *decompressor) Reset(r io.Reader, dict []byte) error { + *f = decompressor{ + r: makeReader(r), + bits: f.bits, + codebits: f.codebits, + h1: f.h1, + h2: f.h2, + dict: f.dict, + step: (*decompressor).nextBlock, + } + f.dict.init(maxMatchOffset, dict) + return nil +} + +// NewReader returns a new ReadCloser that can be used +// to read the uncompressed version of r. +// If r does not also implement io.ByteReader, +// the decompressor may read more data than necessary from r. +// It is the caller's responsibility to call Close on the ReadCloser +// when finished reading. +// +// The ReadCloser returned by NewReader also implements Resetter. +func NewReader(r io.Reader) io.ReadCloser { + fixedHuffmanDecoderInit() + + var f decompressor + f.r = makeReader(r) + f.bits = new([maxNumLit + maxNumDist]int) + f.codebits = new([numCodes]int) + f.step = (*decompressor).nextBlock + f.dict.init(maxMatchOffset, nil) + return &f +} + +// NewReaderDict is like NewReader but initializes the reader +// with a preset dictionary. The returned Reader behaves as if +// the uncompressed data stream started with the given dictionary, +// which has already been read. NewReaderDict is typically used +// to read data compressed by NewWriterDict. +// +// The ReadCloser returned by NewReader also implements Resetter. +func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser { + fixedHuffmanDecoderInit() + + var f decompressor + f.r = makeReader(r) + f.bits = new([maxNumLit + maxNumDist]int) + f.codebits = new([numCodes]int) + f.step = (*decompressor).nextBlock + f.dict.init(maxMatchOffset, dict) + return &f +} diff --git a/vendor/github.com/klauspost/compress/flate/inflate_gen.go b/vendor/github.com/klauspost/compress/flate/inflate_gen.go index 61342b6b88..57fbf3958a 100644 --- a/vendor/github.com/klauspost/compress/flate/inflate_gen.go +++ b/vendor/github.com/klauspost/compress/flate/inflate_gen.go @@ -1,1283 +1,1283 @@ -// Code generated by go generate gen_inflate.go. DO NOT EDIT. - -package flate - -import ( - "bufio" - "bytes" - "fmt" - "math/bits" - "strings" -) - -// Decode a single Huffman block from f. -// hl and hd are the Huffman states for the lit/length values -// and the distance values, respectively. If hd == nil, using the -// fixed distance encoding associated with fixed Huffman blocks. -func (f *decompressor) huffmanBytesBuffer() { - const ( - stateInit = iota // Zero value must be stateInit - stateDict - ) - fr := f.r.(*bytes.Buffer) - - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - fnb, fb, dict := f.nb, f.b, &f.dict - - switch f.stepState { - case stateInit: - goto readLiteral - case stateDict: - goto copyHistory - } - -readLiteral: - // Read literal and/or (length, distance) according to RFC section 3.2.3. - { - var v int - { - // Inlined v, err := f.huffSym(f.hl) - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hl.maxRead) - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - v = int(chunk >> huffmanValueShift) - break - } - } - } - - var length int - switch { - case v < 256: - dict.writeByte(byte(v)) - if dict.availWrite() == 0 { - f.toRead = dict.readFlush() - f.step = (*decompressor).huffmanBytesBuffer - f.stepState = stateInit - f.b, f.nb = fb, fnb - return - } - goto readLiteral - case v == 256: - f.b, f.nb = fb, fnb - f.finishBlock() - return - // otherwise, reference to older data - case v < 265: - length = v - (257 - 3) - case v < maxNumLit: - val := decCodeToLen[(v - 257)] - length = int(val.length) + 3 - n := uint(val.extra) - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits n>0:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - length += int(fb & bitMask32[n]) - fb >>= n & regSizeMaskUint32 - fnb -= n - default: - if debugDecode { - fmt.Println(v, ">= maxNumLit") - } - f.err = CorruptInputError(f.roffset) - f.b, f.nb = fb, fnb - return - } - - var dist uint32 - if f.hd == nil { - for fnb < 5 { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb<5:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) - fb >>= 5 - fnb -= 5 - } else { - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hd.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - dist = uint32(chunk >> huffmanValueShift) - break - } - } - } - - switch { - case dist < 4: - dist++ - case dist < maxNumDist: - nb := uint(dist-2) >> 1 - // have 1 bit in bottom of dist, need nb more. - extra := (dist & 1) << (nb & regSizeMaskUint32) - for fnb < nb { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - fnb -= nb - dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra - // slower: dist = bitMask32[nb+1] + 2 + extra - default: - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist too big:", dist, maxNumDist) - } - f.err = CorruptInputError(f.roffset) - return - } - - // No check on length; encoding can be prescient. - if dist > uint32(dict.histSize()) { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist > dict.histSize():", dist, dict.histSize()) - } - f.err = CorruptInputError(f.roffset) - return - } - - f.copyLen, f.copyDist = length, int(dist) - goto copyHistory - } - -copyHistory: - // Perform a backwards copy according to RFC section 3.2.3. - { - cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) - if cnt == 0 { - cnt = dict.writeCopy(f.copyDist, f.copyLen) - } - f.copyLen -= cnt - - if dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = dict.readFlush() - f.step = (*decompressor).huffmanBytesBuffer // We need to continue this work - f.stepState = stateDict - f.b, f.nb = fb, fnb - return - } - goto readLiteral - } - // Not reached -} - -// Decode a single Huffman block from f. -// hl and hd are the Huffman states for the lit/length values -// and the distance values, respectively. If hd == nil, using the -// fixed distance encoding associated with fixed Huffman blocks. -func (f *decompressor) huffmanBytesReader() { - const ( - stateInit = iota // Zero value must be stateInit - stateDict - ) - fr := f.r.(*bytes.Reader) - - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - fnb, fb, dict := f.nb, f.b, &f.dict - - switch f.stepState { - case stateInit: - goto readLiteral - case stateDict: - goto copyHistory - } - -readLiteral: - // Read literal and/or (length, distance) according to RFC section 3.2.3. - { - var v int - { - // Inlined v, err := f.huffSym(f.hl) - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hl.maxRead) - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - v = int(chunk >> huffmanValueShift) - break - } - } - } - - var length int - switch { - case v < 256: - dict.writeByte(byte(v)) - if dict.availWrite() == 0 { - f.toRead = dict.readFlush() - f.step = (*decompressor).huffmanBytesReader - f.stepState = stateInit - f.b, f.nb = fb, fnb - return - } - goto readLiteral - case v == 256: - f.b, f.nb = fb, fnb - f.finishBlock() - return - // otherwise, reference to older data - case v < 265: - length = v - (257 - 3) - case v < maxNumLit: - val := decCodeToLen[(v - 257)] - length = int(val.length) + 3 - n := uint(val.extra) - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits n>0:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - length += int(fb & bitMask32[n]) - fb >>= n & regSizeMaskUint32 - fnb -= n - default: - if debugDecode { - fmt.Println(v, ">= maxNumLit") - } - f.err = CorruptInputError(f.roffset) - f.b, f.nb = fb, fnb - return - } - - var dist uint32 - if f.hd == nil { - for fnb < 5 { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb<5:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) - fb >>= 5 - fnb -= 5 - } else { - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hd.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - dist = uint32(chunk >> huffmanValueShift) - break - } - } - } - - switch { - case dist < 4: - dist++ - case dist < maxNumDist: - nb := uint(dist-2) >> 1 - // have 1 bit in bottom of dist, need nb more. - extra := (dist & 1) << (nb & regSizeMaskUint32) - for fnb < nb { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - fnb -= nb - dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra - // slower: dist = bitMask32[nb+1] + 2 + extra - default: - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist too big:", dist, maxNumDist) - } - f.err = CorruptInputError(f.roffset) - return - } - - // No check on length; encoding can be prescient. - if dist > uint32(dict.histSize()) { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist > dict.histSize():", dist, dict.histSize()) - } - f.err = CorruptInputError(f.roffset) - return - } - - f.copyLen, f.copyDist = length, int(dist) - goto copyHistory - } - -copyHistory: - // Perform a backwards copy according to RFC section 3.2.3. - { - cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) - if cnt == 0 { - cnt = dict.writeCopy(f.copyDist, f.copyLen) - } - f.copyLen -= cnt - - if dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = dict.readFlush() - f.step = (*decompressor).huffmanBytesReader // We need to continue this work - f.stepState = stateDict - f.b, f.nb = fb, fnb - return - } - goto readLiteral - } - // Not reached -} - -// Decode a single Huffman block from f. -// hl and hd are the Huffman states for the lit/length values -// and the distance values, respectively. If hd == nil, using the -// fixed distance encoding associated with fixed Huffman blocks. -func (f *decompressor) huffmanBufioReader() { - const ( - stateInit = iota // Zero value must be stateInit - stateDict - ) - fr := f.r.(*bufio.Reader) - - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - fnb, fb, dict := f.nb, f.b, &f.dict - - switch f.stepState { - case stateInit: - goto readLiteral - case stateDict: - goto copyHistory - } - -readLiteral: - // Read literal and/or (length, distance) according to RFC section 3.2.3. - { - var v int - { - // Inlined v, err := f.huffSym(f.hl) - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hl.maxRead) - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - v = int(chunk >> huffmanValueShift) - break - } - } - } - - var length int - switch { - case v < 256: - dict.writeByte(byte(v)) - if dict.availWrite() == 0 { - f.toRead = dict.readFlush() - f.step = (*decompressor).huffmanBufioReader - f.stepState = stateInit - f.b, f.nb = fb, fnb - return - } - goto readLiteral - case v == 256: - f.b, f.nb = fb, fnb - f.finishBlock() - return - // otherwise, reference to older data - case v < 265: - length = v - (257 - 3) - case v < maxNumLit: - val := decCodeToLen[(v - 257)] - length = int(val.length) + 3 - n := uint(val.extra) - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits n>0:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - length += int(fb & bitMask32[n]) - fb >>= n & regSizeMaskUint32 - fnb -= n - default: - if debugDecode { - fmt.Println(v, ">= maxNumLit") - } - f.err = CorruptInputError(f.roffset) - f.b, f.nb = fb, fnb - return - } - - var dist uint32 - if f.hd == nil { - for fnb < 5 { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb<5:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) - fb >>= 5 - fnb -= 5 - } else { - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hd.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - dist = uint32(chunk >> huffmanValueShift) - break - } - } - } - - switch { - case dist < 4: - dist++ - case dist < maxNumDist: - nb := uint(dist-2) >> 1 - // have 1 bit in bottom of dist, need nb more. - extra := (dist & 1) << (nb & regSizeMaskUint32) - for fnb < nb { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - fnb -= nb - dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra - // slower: dist = bitMask32[nb+1] + 2 + extra - default: - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist too big:", dist, maxNumDist) - } - f.err = CorruptInputError(f.roffset) - return - } - - // No check on length; encoding can be prescient. - if dist > uint32(dict.histSize()) { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist > dict.histSize():", dist, dict.histSize()) - } - f.err = CorruptInputError(f.roffset) - return - } - - f.copyLen, f.copyDist = length, int(dist) - goto copyHistory - } - -copyHistory: - // Perform a backwards copy according to RFC section 3.2.3. - { - cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) - if cnt == 0 { - cnt = dict.writeCopy(f.copyDist, f.copyLen) - } - f.copyLen -= cnt - - if dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = dict.readFlush() - f.step = (*decompressor).huffmanBufioReader // We need to continue this work - f.stepState = stateDict - f.b, f.nb = fb, fnb - return - } - goto readLiteral - } - // Not reached -} - -// Decode a single Huffman block from f. -// hl and hd are the Huffman states for the lit/length values -// and the distance values, respectively. If hd == nil, using the -// fixed distance encoding associated with fixed Huffman blocks. -func (f *decompressor) huffmanStringsReader() { - const ( - stateInit = iota // Zero value must be stateInit - stateDict - ) - fr := f.r.(*strings.Reader) - - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - fnb, fb, dict := f.nb, f.b, &f.dict - - switch f.stepState { - case stateInit: - goto readLiteral - case stateDict: - goto copyHistory - } - -readLiteral: - // Read literal and/or (length, distance) according to RFC section 3.2.3. - { - var v int - { - // Inlined v, err := f.huffSym(f.hl) - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hl.maxRead) - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - v = int(chunk >> huffmanValueShift) - break - } - } - } - - var length int - switch { - case v < 256: - dict.writeByte(byte(v)) - if dict.availWrite() == 0 { - f.toRead = dict.readFlush() - f.step = (*decompressor).huffmanStringsReader - f.stepState = stateInit - f.b, f.nb = fb, fnb - return - } - goto readLiteral - case v == 256: - f.b, f.nb = fb, fnb - f.finishBlock() - return - // otherwise, reference to older data - case v < 265: - length = v - (257 - 3) - case v < maxNumLit: - val := decCodeToLen[(v - 257)] - length = int(val.length) + 3 - n := uint(val.extra) - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits n>0:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - length += int(fb & bitMask32[n]) - fb >>= n & regSizeMaskUint32 - fnb -= n - default: - if debugDecode { - fmt.Println(v, ">= maxNumLit") - } - f.err = CorruptInputError(f.roffset) - f.b, f.nb = fb, fnb - return - } - - var dist uint32 - if f.hd == nil { - for fnb < 5 { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb<5:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) - fb >>= 5 - fnb -= 5 - } else { - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hd.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - dist = uint32(chunk >> huffmanValueShift) - break - } - } - } - - switch { - case dist < 4: - dist++ - case dist < maxNumDist: - nb := uint(dist-2) >> 1 - // have 1 bit in bottom of dist, need nb more. - extra := (dist & 1) << (nb & regSizeMaskUint32) - for fnb < nb { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - fnb -= nb - dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra - // slower: dist = bitMask32[nb+1] + 2 + extra - default: - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist too big:", dist, maxNumDist) - } - f.err = CorruptInputError(f.roffset) - return - } - - // No check on length; encoding can be prescient. - if dist > uint32(dict.histSize()) { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist > dict.histSize():", dist, dict.histSize()) - } - f.err = CorruptInputError(f.roffset) - return - } - - f.copyLen, f.copyDist = length, int(dist) - goto copyHistory - } - -copyHistory: - // Perform a backwards copy according to RFC section 3.2.3. - { - cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) - if cnt == 0 { - cnt = dict.writeCopy(f.copyDist, f.copyLen) - } - f.copyLen -= cnt - - if dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = dict.readFlush() - f.step = (*decompressor).huffmanStringsReader // We need to continue this work - f.stepState = stateDict - f.b, f.nb = fb, fnb - return - } - goto readLiteral - } - // Not reached -} - -// Decode a single Huffman block from f. -// hl and hd are the Huffman states for the lit/length values -// and the distance values, respectively. If hd == nil, using the -// fixed distance encoding associated with fixed Huffman blocks. -func (f *decompressor) huffmanGenericReader() { - const ( - stateInit = iota // Zero value must be stateInit - stateDict - ) - fr := f.r.(Reader) - - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - fnb, fb, dict := f.nb, f.b, &f.dict - - switch f.stepState { - case stateInit: - goto readLiteral - case stateDict: - goto copyHistory - } - -readLiteral: - // Read literal and/or (length, distance) according to RFC section 3.2.3. - { - var v int - { - // Inlined v, err := f.huffSym(f.hl) - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hl.maxRead) - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - v = int(chunk >> huffmanValueShift) - break - } - } - } - - var length int - switch { - case v < 256: - dict.writeByte(byte(v)) - if dict.availWrite() == 0 { - f.toRead = dict.readFlush() - f.step = (*decompressor).huffmanGenericReader - f.stepState = stateInit - f.b, f.nb = fb, fnb - return - } - goto readLiteral - case v == 256: - f.b, f.nb = fb, fnb - f.finishBlock() - return - // otherwise, reference to older data - case v < 265: - length = v - (257 - 3) - case v < maxNumLit: - val := decCodeToLen[(v - 257)] - length = int(val.length) + 3 - n := uint(val.extra) - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits n>0:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - length += int(fb & bitMask32[n]) - fb >>= n & regSizeMaskUint32 - fnb -= n - default: - if debugDecode { - fmt.Println(v, ">= maxNumLit") - } - f.err = CorruptInputError(f.roffset) - f.b, f.nb = fb, fnb - return - } - - var dist uint32 - if f.hd == nil { - for fnb < 5 { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb<5:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) - fb >>= 5 - fnb -= 5 - } else { - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hd.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - dist = uint32(chunk >> huffmanValueShift) - break - } - } - } - - switch { - case dist < 4: - dist++ - case dist < maxNumDist: - nb := uint(dist-2) >> 1 - // have 1 bit in bottom of dist, need nb more. - extra := (dist & 1) << (nb & regSizeMaskUint32) - for fnb < nb { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - fnb -= nb - dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra - // slower: dist = bitMask32[nb+1] + 2 + extra - default: - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist too big:", dist, maxNumDist) - } - f.err = CorruptInputError(f.roffset) - return - } - - // No check on length; encoding can be prescient. - if dist > uint32(dict.histSize()) { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist > dict.histSize():", dist, dict.histSize()) - } - f.err = CorruptInputError(f.roffset) - return - } - - f.copyLen, f.copyDist = length, int(dist) - goto copyHistory - } - -copyHistory: - // Perform a backwards copy according to RFC section 3.2.3. - { - cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) - if cnt == 0 { - cnt = dict.writeCopy(f.copyDist, f.copyLen) - } - f.copyLen -= cnt - - if dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = dict.readFlush() - f.step = (*decompressor).huffmanGenericReader // We need to continue this work - f.stepState = stateDict - f.b, f.nb = fb, fnb - return - } - goto readLiteral - } - // Not reached -} - -func (f *decompressor) huffmanBlockDecoder() func() { - switch f.r.(type) { - case *bytes.Buffer: - return f.huffmanBytesBuffer - case *bytes.Reader: - return f.huffmanBytesReader - case *bufio.Reader: - return f.huffmanBufioReader - case *strings.Reader: - return f.huffmanStringsReader - case Reader: - return f.huffmanGenericReader - default: - return f.huffmanGenericReader - } -} +// Code generated by go generate gen_inflate.go. DO NOT EDIT. + +package flate + +import ( + "bufio" + "bytes" + "fmt" + "math/bits" + "strings" +) + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) huffmanBytesBuffer() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + fr := f.r.(*bytes.Buffer) + + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb, dict := f.nb, f.b, &f.dict + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + var v int + { + // Inlined v, err := f.huffSym(f.hl) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hl.maxRead) + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + v = int(chunk >> huffmanValueShift) + break + } + } + } + + var length int + switch { + case v < 256: + dict.writeByte(byte(v)) + if dict.availWrite() == 0 { + f.toRead = dict.readFlush() + f.step = (*decompressor).huffmanBytesBuffer + f.stepState = stateInit + f.b, f.nb = fb, fnb + return + } + goto readLiteral + case v == 256: + f.b, f.nb = fb, fnb + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + case v < maxNumLit: + val := decCodeToLen[(v - 257)] + length = int(val.length) + 3 + n := uint(val.extra) + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n + default: + if debugDecode { + fmt.Println(v, ">= maxNumLit") + } + f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb + return + } + + var dist uint32 + if f.hd == nil { + for fnb < 5 { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb<5:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 + } else { + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hd.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + dist = uint32(chunk >> huffmanValueShift) + break + } + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << (nb & regSizeMaskUint32) + for fnb < nb { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 + fnb -= nb + dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra + default: + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist too big:", dist, maxNumDist) + } + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > uint32(dict.histSize()) { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist > dict.histSize():", dist, dict.histSize()) + } + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, int(dist) + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = dict.readFlush() + f.step = (*decompressor).huffmanBytesBuffer // We need to continue this work + f.stepState = stateDict + f.b, f.nb = fb, fnb + return + } + goto readLiteral + } + // Not reached +} + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) huffmanBytesReader() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + fr := f.r.(*bytes.Reader) + + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb, dict := f.nb, f.b, &f.dict + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + var v int + { + // Inlined v, err := f.huffSym(f.hl) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hl.maxRead) + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + v = int(chunk >> huffmanValueShift) + break + } + } + } + + var length int + switch { + case v < 256: + dict.writeByte(byte(v)) + if dict.availWrite() == 0 { + f.toRead = dict.readFlush() + f.step = (*decompressor).huffmanBytesReader + f.stepState = stateInit + f.b, f.nb = fb, fnb + return + } + goto readLiteral + case v == 256: + f.b, f.nb = fb, fnb + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + case v < maxNumLit: + val := decCodeToLen[(v - 257)] + length = int(val.length) + 3 + n := uint(val.extra) + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n + default: + if debugDecode { + fmt.Println(v, ">= maxNumLit") + } + f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb + return + } + + var dist uint32 + if f.hd == nil { + for fnb < 5 { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb<5:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 + } else { + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hd.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + dist = uint32(chunk >> huffmanValueShift) + break + } + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << (nb & regSizeMaskUint32) + for fnb < nb { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 + fnb -= nb + dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra + default: + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist too big:", dist, maxNumDist) + } + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > uint32(dict.histSize()) { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist > dict.histSize():", dist, dict.histSize()) + } + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, int(dist) + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = dict.readFlush() + f.step = (*decompressor).huffmanBytesReader // We need to continue this work + f.stepState = stateDict + f.b, f.nb = fb, fnb + return + } + goto readLiteral + } + // Not reached +} + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) huffmanBufioReader() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + fr := f.r.(*bufio.Reader) + + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb, dict := f.nb, f.b, &f.dict + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + var v int + { + // Inlined v, err := f.huffSym(f.hl) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hl.maxRead) + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + v = int(chunk >> huffmanValueShift) + break + } + } + } + + var length int + switch { + case v < 256: + dict.writeByte(byte(v)) + if dict.availWrite() == 0 { + f.toRead = dict.readFlush() + f.step = (*decompressor).huffmanBufioReader + f.stepState = stateInit + f.b, f.nb = fb, fnb + return + } + goto readLiteral + case v == 256: + f.b, f.nb = fb, fnb + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + case v < maxNumLit: + val := decCodeToLen[(v - 257)] + length = int(val.length) + 3 + n := uint(val.extra) + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n + default: + if debugDecode { + fmt.Println(v, ">= maxNumLit") + } + f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb + return + } + + var dist uint32 + if f.hd == nil { + for fnb < 5 { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb<5:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 + } else { + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hd.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + dist = uint32(chunk >> huffmanValueShift) + break + } + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << (nb & regSizeMaskUint32) + for fnb < nb { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 + fnb -= nb + dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra + default: + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist too big:", dist, maxNumDist) + } + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > uint32(dict.histSize()) { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist > dict.histSize():", dist, dict.histSize()) + } + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, int(dist) + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = dict.readFlush() + f.step = (*decompressor).huffmanBufioReader // We need to continue this work + f.stepState = stateDict + f.b, f.nb = fb, fnb + return + } + goto readLiteral + } + // Not reached +} + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) huffmanStringsReader() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + fr := f.r.(*strings.Reader) + + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb, dict := f.nb, f.b, &f.dict + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + var v int + { + // Inlined v, err := f.huffSym(f.hl) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hl.maxRead) + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + v = int(chunk >> huffmanValueShift) + break + } + } + } + + var length int + switch { + case v < 256: + dict.writeByte(byte(v)) + if dict.availWrite() == 0 { + f.toRead = dict.readFlush() + f.step = (*decompressor).huffmanStringsReader + f.stepState = stateInit + f.b, f.nb = fb, fnb + return + } + goto readLiteral + case v == 256: + f.b, f.nb = fb, fnb + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + case v < maxNumLit: + val := decCodeToLen[(v - 257)] + length = int(val.length) + 3 + n := uint(val.extra) + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n + default: + if debugDecode { + fmt.Println(v, ">= maxNumLit") + } + f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb + return + } + + var dist uint32 + if f.hd == nil { + for fnb < 5 { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb<5:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 + } else { + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hd.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + dist = uint32(chunk >> huffmanValueShift) + break + } + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << (nb & regSizeMaskUint32) + for fnb < nb { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 + fnb -= nb + dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra + default: + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist too big:", dist, maxNumDist) + } + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > uint32(dict.histSize()) { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist > dict.histSize():", dist, dict.histSize()) + } + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, int(dist) + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = dict.readFlush() + f.step = (*decompressor).huffmanStringsReader // We need to continue this work + f.stepState = stateDict + f.b, f.nb = fb, fnb + return + } + goto readLiteral + } + // Not reached +} + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) huffmanGenericReader() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + fr := f.r.(Reader) + + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + fnb, fb, dict := f.nb, f.b, &f.dict + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + var v int + { + // Inlined v, err := f.huffSym(f.hl) + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hl.maxRead) + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + v = int(chunk >> huffmanValueShift) + break + } + } + } + + var length int + switch { + case v < 256: + dict.writeByte(byte(v)) + if dict.availWrite() == 0 { + f.toRead = dict.readFlush() + f.step = (*decompressor).huffmanGenericReader + f.stepState = stateInit + f.b, f.nb = fb, fnb + return + } + goto readLiteral + case v == 256: + f.b, f.nb = fb, fnb + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + case v < maxNumLit: + val := decCodeToLen[(v - 257)] + length = int(val.length) + 3 + n := uint(val.extra) + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits n>0:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + length += int(fb & bitMask32[n]) + fb >>= n & regSizeMaskUint32 + fnb -= n + default: + if debugDecode { + fmt.Println(v, ">= maxNumLit") + } + f.err = CorruptInputError(f.roffset) + f.b, f.nb = fb, fnb + return + } + + var dist uint32 + if f.hd == nil { + for fnb < 5 { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb<5:", err) + } + f.err = err + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) + fb >>= 5 + fnb -= 5 + } else { + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(f.hd.maxRead) + // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, + // but is smart enough to keep local variables in registers, so use nb and b, + // inline call to moreBits and reassign b,nb back to f on return. + for { + for fnb < n { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + f.err = noEOF(err) + return + } + f.roffset++ + fb |= uint32(c) << (fnb & regSizeMaskUint32) + fnb += 8 + } + chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= fnb { + if n == 0 { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("huffsym: n==0") + } + f.err = CorruptInputError(f.roffset) + return + } + fb = fb >> (n & regSizeMaskUint32) + fnb = fnb - n + dist = uint32(chunk >> huffmanValueShift) + break + } + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << (nb & regSizeMaskUint32) + for fnb < nb { + c, err := fr.ReadByte() + if err != nil { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 + fnb -= nb + dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra + // slower: dist = bitMask32[nb+1] + 2 + extra + default: + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist too big:", dist, maxNumDist) + } + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > uint32(dict.histSize()) { + f.b, f.nb = fb, fnb + if debugDecode { + fmt.Println("dist > dict.histSize():", dist, dict.histSize()) + } + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, int(dist) + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = dict.readFlush() + f.step = (*decompressor).huffmanGenericReader // We need to continue this work + f.stepState = stateDict + f.b, f.nb = fb, fnb + return + } + goto readLiteral + } + // Not reached +} + +func (f *decompressor) huffmanBlockDecoder() func() { + switch f.r.(type) { + case *bytes.Buffer: + return f.huffmanBytesBuffer + case *bytes.Reader: + return f.huffmanBytesReader + case *bufio.Reader: + return f.huffmanBufioReader + case *strings.Reader: + return f.huffmanStringsReader + case Reader: + return f.huffmanGenericReader + default: + return f.huffmanGenericReader + } +} diff --git a/vendor/github.com/klauspost/compress/flate/level1.go b/vendor/github.com/klauspost/compress/flate/level1.go index 703b9a89aa..c2924df36c 100644 --- a/vendor/github.com/klauspost/compress/flate/level1.go +++ b/vendor/github.com/klauspost/compress/flate/level1.go @@ -1,241 +1,241 @@ -package flate - -import ( - "encoding/binary" - "fmt" - "math/bits" -) - -// fastGen maintains the table for matches, -// and the previous byte block for level 2. -// This is the generic implementation. -type fastEncL1 struct { - fastGen - table [tableSize]tableEntry -} - -// EncodeL1 uses a similar algorithm to level 1 -func (e *fastEncL1) Encode(dst *tokens, src []byte) { - const ( - inputMargin = 12 - 1 - minNonLiteralBlockSize = 1 + 1 + inputMargin - hashBytes = 5 - ) - if debugDeflate && e.cur < 0 { - panic(fmt.Sprint("e.cur < 0: ", e.cur)) - } - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - e.cur = maxMatchOffset - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - maxMatchOffset - for i := range e.table[:] { - v := e.table[i].offset - if v <= minOff { - v = 0 - } else { - v = v - e.cur + maxMatchOffset - } - e.table[i].offset = v - } - e.cur = maxMatchOffset - } - - s := e.addBlock(src) - - // This check isn't in the Snappy implementation, but there, the caller - // instead of the callee handles this case. - if len(src) < minNonLiteralBlockSize { - // We do not fill the token table. - // This will be picked up by caller. - dst.n = uint16(len(src)) - return - } - - // Override src - src = e.hist - nextEmit := s - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := int32(len(src) - inputMargin) - - // nextEmit is where in src the next emitLiteral should start from. - cv := load6432(src, s) - - for { - const skipLog = 5 - const doEvery = 2 - - nextS := s - var candidate tableEntry - for { - nextHash := hashLen(cv, tableBits, hashBytes) - candidate = e.table[nextHash] - nextS = s + doEvery + (s-nextEmit)>>skipLog - if nextS > sLimit { - goto emitRemainder - } - - now := load6432(src, nextS) - e.table[nextHash] = tableEntry{offset: s + e.cur} - nextHash = hashLen(now, tableBits, hashBytes) - - offset := s - (candidate.offset - e.cur) - if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { - e.table[nextHash] = tableEntry{offset: nextS + e.cur} - break - } - - // Do one right away... - cv = now - s = nextS - nextS++ - candidate = e.table[nextHash] - now >>= 8 - e.table[nextHash] = tableEntry{offset: s + e.cur} - - offset = s - (candidate.offset - e.cur) - if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { - e.table[nextHash] = tableEntry{offset: nextS + e.cur} - break - } - cv = now - s = nextS - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - for { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - - // Extend the 4-byte match as long as possible. - t := candidate.offset - e.cur - var l = int32(4) - if false { - l = e.matchlenLong(s+4, t+4, src) + 4 - } else { - // inlined: - a := src[s+4:] - b := src[t+4:] - for len(a) >= 8 { - if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 { - l += int32(bits.TrailingZeros64(diff) >> 3) - break - } - l += 8 - a = a[8:] - b = b[8:] - } - if len(a) < 8 { - b = b[:len(a)] - for i := range a { - if a[i] != b[i] { - break - } - l++ - } - } - } - - // Extend backwards - for t > 0 && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - if nextEmit < s { - if false { - emitLiteral(dst, src[nextEmit:s]) - } else { - for _, v := range src[nextEmit:s] { - dst.tokens[dst.n] = token(v) - dst.litHist[v]++ - dst.n++ - } - } - } - - // Save the match found - if false { - dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) - } else { - // Inlined... - xoffset := uint32(s - t - baseMatchOffset) - xlength := l - oc := offsetCode(xoffset) - xoffset |= oc << 16 - for xlength > 0 { - xl := xlength - if xl > 258 { - if xl > 258+baseMatchLength { - xl = 258 - } else { - xl = 258 - baseMatchLength - } - } - xlength -= xl - xl -= baseMatchLength - dst.extraHist[lengthCodes1[uint8(xl)]]++ - dst.offHist[oc]++ - dst.tokens[dst.n] = token(matchType | uint32(xl)<= s { - s = nextS + 1 - } - if s >= sLimit { - // Index first pair after match end. - if int(s+l+8) < len(src) { - cv := load6432(src, s) - e.table[hashLen(cv, tableBits, hashBytes)] = tableEntry{offset: s + e.cur} - } - goto emitRemainder - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-2 and at s. If - // another emitCopy is not our next move, also calculate nextHash - // at s+1. At least on GOARCH=amd64, these three hash calculations - // are faster as one load64 call (with some shifts) instead of - // three load32 calls. - x := load6432(src, s-2) - o := e.cur + s - 2 - prevHash := hashLen(x, tableBits, hashBytes) - e.table[prevHash] = tableEntry{offset: o} - x >>= 16 - currHash := hashLen(x, tableBits, hashBytes) - candidate = e.table[currHash] - e.table[currHash] = tableEntry{offset: o + 2} - - offset := s - (candidate.offset - e.cur) - if offset > maxMatchOffset || uint32(x) != load3232(src, candidate.offset-e.cur) { - cv = x >> 8 - s++ - break - } - } - } - -emitRemainder: - if int(nextEmit) < len(src) { - // If nothing was added, don't encode literals. - if dst.n == 0 { - return - } - emitLiteral(dst, src[nextEmit:]) - } -} +package flate + +import ( + "encoding/binary" + "fmt" + "math/bits" +) + +// fastGen maintains the table for matches, +// and the previous byte block for level 2. +// This is the generic implementation. +type fastEncL1 struct { + fastGen + table [tableSize]tableEntry +} + +// EncodeL1 uses a similar algorithm to level 1 +func (e *fastEncL1) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + hashBytes = 5 + ) + if debugDeflate && e.cur < 0 { + panic(fmt.Sprint("e.cur < 0: ", e.cur)) + } + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.table[i].offset = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load6432(src, s) + + for { + const skipLog = 5 + const doEvery = 2 + + nextS := s + var candidate tableEntry + for { + nextHash := hashLen(cv, tableBits, hashBytes) + candidate = e.table[nextHash] + nextS = s + doEvery + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + + now := load6432(src, nextS) + e.table[nextHash] = tableEntry{offset: s + e.cur} + nextHash = hashLen(now, tableBits, hashBytes) + + offset := s - (candidate.offset - e.cur) + if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { + e.table[nextHash] = tableEntry{offset: nextS + e.cur} + break + } + + // Do one right away... + cv = now + s = nextS + nextS++ + candidate = e.table[nextHash] + now >>= 8 + e.table[nextHash] = tableEntry{offset: s + e.cur} + + offset = s - (candidate.offset - e.cur) + if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { + e.table[nextHash] = tableEntry{offset: nextS + e.cur} + break + } + cv = now + s = nextS + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + + // Extend the 4-byte match as long as possible. + t := candidate.offset - e.cur + var l = int32(4) + if false { + l = e.matchlenLong(s+4, t+4, src) + 4 + } else { + // inlined: + a := src[s+4:] + b := src[t+4:] + for len(a) >= 8 { + if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 { + l += int32(bits.TrailingZeros64(diff) >> 3) + break + } + l += 8 + a = a[8:] + b = b[8:] + } + if len(a) < 8 { + b = b[:len(a)] + for i := range a { + if a[i] != b[i] { + break + } + l++ + } + } + } + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } + } + + // Save the match found + if false { + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + } else { + // Inlined... + xoffset := uint32(s - t - baseMatchOffset) + xlength := l + oc := offsetCode(xoffset) + xoffset |= oc << 16 + for xlength > 0 { + xl := xlength + if xl > 258 { + if xl > 258+baseMatchLength { + xl = 258 + } else { + xl = 258 - baseMatchLength + } + } + xlength -= xl + xl -= baseMatchLength + dst.extraHist[lengthCodes1[uint8(xl)]]++ + dst.offHist[oc]++ + dst.tokens[dst.n] = token(matchType | uint32(xl)<= s { + s = nextS + 1 + } + if s >= sLimit { + // Index first pair after match end. + if int(s+l+8) < len(src) { + cv := load6432(src, s) + e.table[hashLen(cv, tableBits, hashBytes)] = tableEntry{offset: s + e.cur} + } + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-2 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load6432(src, s-2) + o := e.cur + s - 2 + prevHash := hashLen(x, tableBits, hashBytes) + e.table[prevHash] = tableEntry{offset: o} + x >>= 16 + currHash := hashLen(x, tableBits, hashBytes) + candidate = e.table[currHash] + e.table[currHash] = tableEntry{offset: o + 2} + + offset := s - (candidate.offset - e.cur) + if offset > maxMatchOffset || uint32(x) != load3232(src, candidate.offset-e.cur) { + cv = x >> 8 + s++ + break + } + } + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + emitLiteral(dst, src[nextEmit:]) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/level2.go b/vendor/github.com/klauspost/compress/flate/level2.go index 876dfbe305..55e2fd5872 100644 --- a/vendor/github.com/klauspost/compress/flate/level2.go +++ b/vendor/github.com/klauspost/compress/flate/level2.go @@ -1,214 +1,214 @@ -package flate - -import "fmt" - -// fastGen maintains the table for matches, -// and the previous byte block for level 2. -// This is the generic implementation. -type fastEncL2 struct { - fastGen - table [bTableSize]tableEntry -} - -// EncodeL2 uses a similar algorithm to level 1, but is capable -// of matching across blocks giving better compression at a small slowdown. -func (e *fastEncL2) Encode(dst *tokens, src []byte) { - const ( - inputMargin = 12 - 1 - minNonLiteralBlockSize = 1 + 1 + inputMargin - hashBytes = 5 - ) - - if debugDeflate && e.cur < 0 { - panic(fmt.Sprint("e.cur < 0: ", e.cur)) - } - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - e.cur = maxMatchOffset - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - maxMatchOffset - for i := range e.table[:] { - v := e.table[i].offset - if v <= minOff { - v = 0 - } else { - v = v - e.cur + maxMatchOffset - } - e.table[i].offset = v - } - e.cur = maxMatchOffset - } - - s := e.addBlock(src) - - // This check isn't in the Snappy implementation, but there, the caller - // instead of the callee handles this case. - if len(src) < minNonLiteralBlockSize { - // We do not fill the token table. - // This will be picked up by caller. - dst.n = uint16(len(src)) - return - } - - // Override src - src = e.hist - nextEmit := s - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := int32(len(src) - inputMargin) - - // nextEmit is where in src the next emitLiteral should start from. - cv := load6432(src, s) - for { - // When should we start skipping if we haven't found matches in a long while. - const skipLog = 5 - const doEvery = 2 - - nextS := s - var candidate tableEntry - for { - nextHash := hashLen(cv, bTableBits, hashBytes) - s = nextS - nextS = s + doEvery + (s-nextEmit)>>skipLog - if nextS > sLimit { - goto emitRemainder - } - candidate = e.table[nextHash] - now := load6432(src, nextS) - e.table[nextHash] = tableEntry{offset: s + e.cur} - nextHash = hashLen(now, bTableBits, hashBytes) - - offset := s - (candidate.offset - e.cur) - if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { - e.table[nextHash] = tableEntry{offset: nextS + e.cur} - break - } - - // Do one right away... - cv = now - s = nextS - nextS++ - candidate = e.table[nextHash] - now >>= 8 - e.table[nextHash] = tableEntry{offset: s + e.cur} - - offset = s - (candidate.offset - e.cur) - if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { - break - } - cv = now - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - - // Call emitCopy, and then see if another emitCopy could be our next - // move. Repeat until we find no match for the input immediately after - // what was consumed by the last emitCopy call. - // - // If we exit this loop normally then we need to call emitLiteral next, - // though we don't yet know how big the literal will be. We handle that - // by proceeding to the next iteration of the main loop. We also can - // exit this loop via goto if we get close to exhausting the input. - for { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - - // Extend the 4-byte match as long as possible. - t := candidate.offset - e.cur - l := e.matchlenLong(s+4, t+4, src) + 4 - - // Extend backwards - for t > 0 && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - if nextEmit < s { - if false { - emitLiteral(dst, src[nextEmit:s]) - } else { - for _, v := range src[nextEmit:s] { - dst.tokens[dst.n] = token(v) - dst.litHist[v]++ - dst.n++ - } - } - } - - dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) - s += l - nextEmit = s - if nextS >= s { - s = nextS + 1 - } - - if s >= sLimit { - // Index first pair after match end. - if int(s+l+8) < len(src) { - cv := load6432(src, s) - e.table[hashLen(cv, bTableBits, hashBytes)] = tableEntry{offset: s + e.cur} - } - goto emitRemainder - } - - // Store every second hash in-between, but offset by 1. - for i := s - l + 2; i < s-5; i += 7 { - x := load6432(src, i) - nextHash := hashLen(x, bTableBits, hashBytes) - e.table[nextHash] = tableEntry{offset: e.cur + i} - // Skip one - x >>= 16 - nextHash = hashLen(x, bTableBits, hashBytes) - e.table[nextHash] = tableEntry{offset: e.cur + i + 2} - // Skip one - x >>= 16 - nextHash = hashLen(x, bTableBits, hashBytes) - e.table[nextHash] = tableEntry{offset: e.cur + i + 4} - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-2 to s. If - // another emitCopy is not our next move, also calculate nextHash - // at s+1. At least on GOARCH=amd64, these three hash calculations - // are faster as one load64 call (with some shifts) instead of - // three load32 calls. - x := load6432(src, s-2) - o := e.cur + s - 2 - prevHash := hashLen(x, bTableBits, hashBytes) - prevHash2 := hashLen(x>>8, bTableBits, hashBytes) - e.table[prevHash] = tableEntry{offset: o} - e.table[prevHash2] = tableEntry{offset: o + 1} - currHash := hashLen(x>>16, bTableBits, hashBytes) - candidate = e.table[currHash] - e.table[currHash] = tableEntry{offset: o + 2} - - offset := s - (candidate.offset - e.cur) - if offset > maxMatchOffset || uint32(x>>16) != load3232(src, candidate.offset-e.cur) { - cv = x >> 24 - s++ - break - } - } - } - -emitRemainder: - if int(nextEmit) < len(src) { - // If nothing was added, don't encode literals. - if dst.n == 0 { - return - } - - emitLiteral(dst, src[nextEmit:]) - } -} +package flate + +import "fmt" + +// fastGen maintains the table for matches, +// and the previous byte block for level 2. +// This is the generic implementation. +type fastEncL2 struct { + fastGen + table [bTableSize]tableEntry +} + +// EncodeL2 uses a similar algorithm to level 1, but is capable +// of matching across blocks giving better compression at a small slowdown. +func (e *fastEncL2) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + hashBytes = 5 + ) + + if debugDeflate && e.cur < 0 { + panic(fmt.Sprint("e.cur < 0: ", e.cur)) + } + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.table[i].offset = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load6432(src, s) + for { + // When should we start skipping if we haven't found matches in a long while. + const skipLog = 5 + const doEvery = 2 + + nextS := s + var candidate tableEntry + for { + nextHash := hashLen(cv, bTableBits, hashBytes) + s = nextS + nextS = s + doEvery + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + candidate = e.table[nextHash] + now := load6432(src, nextS) + e.table[nextHash] = tableEntry{offset: s + e.cur} + nextHash = hashLen(now, bTableBits, hashBytes) + + offset := s - (candidate.offset - e.cur) + if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { + e.table[nextHash] = tableEntry{offset: nextS + e.cur} + break + } + + // Do one right away... + cv = now + s = nextS + nextS++ + candidate = e.table[nextHash] + now >>= 8 + e.table[nextHash] = tableEntry{offset: s + e.cur} + + offset = s - (candidate.offset - e.cur) + if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { + break + } + cv = now + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + + // Extend the 4-byte match as long as possible. + t := candidate.offset - e.cur + l := e.matchlenLong(s+4, t+4, src) + 4 + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } + } + + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + + if s >= sLimit { + // Index first pair after match end. + if int(s+l+8) < len(src) { + cv := load6432(src, s) + e.table[hashLen(cv, bTableBits, hashBytes)] = tableEntry{offset: s + e.cur} + } + goto emitRemainder + } + + // Store every second hash in-between, but offset by 1. + for i := s - l + 2; i < s-5; i += 7 { + x := load6432(src, i) + nextHash := hashLen(x, bTableBits, hashBytes) + e.table[nextHash] = tableEntry{offset: e.cur + i} + // Skip one + x >>= 16 + nextHash = hashLen(x, bTableBits, hashBytes) + e.table[nextHash] = tableEntry{offset: e.cur + i + 2} + // Skip one + x >>= 16 + nextHash = hashLen(x, bTableBits, hashBytes) + e.table[nextHash] = tableEntry{offset: e.cur + i + 4} + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-2 to s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load6432(src, s-2) + o := e.cur + s - 2 + prevHash := hashLen(x, bTableBits, hashBytes) + prevHash2 := hashLen(x>>8, bTableBits, hashBytes) + e.table[prevHash] = tableEntry{offset: o} + e.table[prevHash2] = tableEntry{offset: o + 1} + currHash := hashLen(x>>16, bTableBits, hashBytes) + candidate = e.table[currHash] + e.table[currHash] = tableEntry{offset: o + 2} + + offset := s - (candidate.offset - e.cur) + if offset > maxMatchOffset || uint32(x>>16) != load3232(src, candidate.offset-e.cur) { + cv = x >> 24 + s++ + break + } + } + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + + emitLiteral(dst, src[nextEmit:]) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/level3.go b/vendor/github.com/klauspost/compress/flate/level3.go index 7aa2b72a12..353163e5e1 100644 --- a/vendor/github.com/klauspost/compress/flate/level3.go +++ b/vendor/github.com/klauspost/compress/flate/level3.go @@ -1,241 +1,241 @@ -package flate - -import "fmt" - -// fastEncL3 -type fastEncL3 struct { - fastGen - table [1 << 16]tableEntryPrev -} - -// Encode uses a similar algorithm to level 2, will check up to two candidates. -func (e *fastEncL3) Encode(dst *tokens, src []byte) { - const ( - inputMargin = 12 - 1 - minNonLiteralBlockSize = 1 + 1 + inputMargin - tableBits = 16 - tableSize = 1 << tableBits - hashBytes = 5 - ) - - if debugDeflate && e.cur < 0 { - panic(fmt.Sprint("e.cur < 0: ", e.cur)) - } - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntryPrev{} - } - e.cur = maxMatchOffset - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - maxMatchOffset - for i := range e.table[:] { - v := e.table[i] - if v.Cur.offset <= minOff { - v.Cur.offset = 0 - } else { - v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset - } - if v.Prev.offset <= minOff { - v.Prev.offset = 0 - } else { - v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset - } - e.table[i] = v - } - e.cur = maxMatchOffset - } - - s := e.addBlock(src) - - // Skip if too small. - if len(src) < minNonLiteralBlockSize { - // We do not fill the token table. - // This will be picked up by caller. - dst.n = uint16(len(src)) - return - } - - // Override src - src = e.hist - nextEmit := s - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := int32(len(src) - inputMargin) - - // nextEmit is where in src the next emitLiteral should start from. - cv := load6432(src, s) - for { - const skipLog = 7 - nextS := s - var candidate tableEntry - for { - nextHash := hashLen(cv, tableBits, hashBytes) - s = nextS - nextS = s + 1 + (s-nextEmit)>>skipLog - if nextS > sLimit { - goto emitRemainder - } - candidates := e.table[nextHash] - now := load6432(src, nextS) - - // Safe offset distance until s + 4... - minOffset := e.cur + s - (maxMatchOffset - 4) - e.table[nextHash] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur}} - - // Check both candidates - candidate = candidates.Cur - if candidate.offset < minOffset { - cv = now - // Previous will also be invalid, we have nothing. - continue - } - - if uint32(cv) == load3232(src, candidate.offset-e.cur) { - if candidates.Prev.offset < minOffset || uint32(cv) != load3232(src, candidates.Prev.offset-e.cur) { - break - } - // Both match and are valid, pick longest. - offset := s - (candidate.offset - e.cur) - o2 := s - (candidates.Prev.offset - e.cur) - l1, l2 := matchLen(src[s+4:], src[s-offset+4:]), matchLen(src[s+4:], src[s-o2+4:]) - if l2 > l1 { - candidate = candidates.Prev - } - break - } else { - // We only check if value mismatches. - // Offset will always be invalid in other cases. - candidate = candidates.Prev - if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { - break - } - } - cv = now - } - - // Call emitCopy, and then see if another emitCopy could be our next - // move. Repeat until we find no match for the input immediately after - // what was consumed by the last emitCopy call. - // - // If we exit this loop normally then we need to call emitLiteral next, - // though we don't yet know how big the literal will be. We handle that - // by proceeding to the next iteration of the main loop. We also can - // exit this loop via goto if we get close to exhausting the input. - for { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - - // Extend the 4-byte match as long as possible. - // - t := candidate.offset - e.cur - l := e.matchlenLong(s+4, t+4, src) + 4 - - // Extend backwards - for t > 0 && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - if nextEmit < s { - if false { - emitLiteral(dst, src[nextEmit:s]) - } else { - for _, v := range src[nextEmit:s] { - dst.tokens[dst.n] = token(v) - dst.litHist[v]++ - dst.n++ - } - } - } - - dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) - s += l - nextEmit = s - if nextS >= s { - s = nextS + 1 - } - - if s >= sLimit { - t += l - // Index first pair after match end. - if int(t+8) < len(src) && t > 0 { - cv = load6432(src, t) - nextHash := hashLen(cv, tableBits, hashBytes) - e.table[nextHash] = tableEntryPrev{ - Prev: e.table[nextHash].Cur, - Cur: tableEntry{offset: e.cur + t}, - } - } - goto emitRemainder - } - - // Store every 5th hash in-between. - for i := s - l + 2; i < s-5; i += 6 { - nextHash := hashLen(load6432(src, i), tableBits, hashBytes) - e.table[nextHash] = tableEntryPrev{ - Prev: e.table[nextHash].Cur, - Cur: tableEntry{offset: e.cur + i}} - } - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-2 to s. - x := load6432(src, s-2) - prevHash := hashLen(x, tableBits, hashBytes) - - e.table[prevHash] = tableEntryPrev{ - Prev: e.table[prevHash].Cur, - Cur: tableEntry{offset: e.cur + s - 2}, - } - x >>= 8 - prevHash = hashLen(x, tableBits, hashBytes) - - e.table[prevHash] = tableEntryPrev{ - Prev: e.table[prevHash].Cur, - Cur: tableEntry{offset: e.cur + s - 1}, - } - x >>= 8 - currHash := hashLen(x, tableBits, hashBytes) - candidates := e.table[currHash] - cv = x - e.table[currHash] = tableEntryPrev{ - Prev: candidates.Cur, - Cur: tableEntry{offset: s + e.cur}, - } - - // Check both candidates - candidate = candidates.Cur - minOffset := e.cur + s - (maxMatchOffset - 4) - - if candidate.offset > minOffset { - if uint32(cv) == load3232(src, candidate.offset-e.cur) { - // Found a match... - continue - } - candidate = candidates.Prev - if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { - // Match at prev... - continue - } - } - cv = x >> 8 - s++ - break - } - } - -emitRemainder: - if int(nextEmit) < len(src) { - // If nothing was added, don't encode literals. - if dst.n == 0 { - return - } - - emitLiteral(dst, src[nextEmit:]) - } -} +package flate + +import "fmt" + +// fastEncL3 +type fastEncL3 struct { + fastGen + table [1 << 16]tableEntryPrev +} + +// Encode uses a similar algorithm to level 2, will check up to two candidates. +func (e *fastEncL3) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + tableBits = 16 + tableSize = 1 << tableBits + hashBytes = 5 + ) + + if debugDeflate && e.cur < 0 { + panic(fmt.Sprint("e.cur < 0: ", e.cur)) + } + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntryPrev{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i] + if v.Cur.offset <= minOff { + v.Cur.offset = 0 + } else { + v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset + } + if v.Prev.offset <= minOff { + v.Prev.offset = 0 + } else { + v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset + } + e.table[i] = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // Skip if too small. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load6432(src, s) + for { + const skipLog = 7 + nextS := s + var candidate tableEntry + for { + nextHash := hashLen(cv, tableBits, hashBytes) + s = nextS + nextS = s + 1 + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + candidates := e.table[nextHash] + now := load6432(src, nextS) + + // Safe offset distance until s + 4... + minOffset := e.cur + s - (maxMatchOffset - 4) + e.table[nextHash] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur}} + + // Check both candidates + candidate = candidates.Cur + if candidate.offset < minOffset { + cv = now + // Previous will also be invalid, we have nothing. + continue + } + + if uint32(cv) == load3232(src, candidate.offset-e.cur) { + if candidates.Prev.offset < minOffset || uint32(cv) != load3232(src, candidates.Prev.offset-e.cur) { + break + } + // Both match and are valid, pick longest. + offset := s - (candidate.offset - e.cur) + o2 := s - (candidates.Prev.offset - e.cur) + l1, l2 := matchLen(src[s+4:], src[s-offset+4:]), matchLen(src[s+4:], src[s-o2+4:]) + if l2 > l1 { + candidate = candidates.Prev + } + break + } else { + // We only check if value mismatches. + // Offset will always be invalid in other cases. + candidate = candidates.Prev + if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { + break + } + } + cv = now + } + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + + // Extend the 4-byte match as long as possible. + // + t := candidate.offset - e.cur + l := e.matchlenLong(s+4, t+4, src) + 4 + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } + } + + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + + if s >= sLimit { + t += l + // Index first pair after match end. + if int(t+8) < len(src) && t > 0 { + cv = load6432(src, t) + nextHash := hashLen(cv, tableBits, hashBytes) + e.table[nextHash] = tableEntryPrev{ + Prev: e.table[nextHash].Cur, + Cur: tableEntry{offset: e.cur + t}, + } + } + goto emitRemainder + } + + // Store every 5th hash in-between. + for i := s - l + 2; i < s-5; i += 6 { + nextHash := hashLen(load6432(src, i), tableBits, hashBytes) + e.table[nextHash] = tableEntryPrev{ + Prev: e.table[nextHash].Cur, + Cur: tableEntry{offset: e.cur + i}} + } + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-2 to s. + x := load6432(src, s-2) + prevHash := hashLen(x, tableBits, hashBytes) + + e.table[prevHash] = tableEntryPrev{ + Prev: e.table[prevHash].Cur, + Cur: tableEntry{offset: e.cur + s - 2}, + } + x >>= 8 + prevHash = hashLen(x, tableBits, hashBytes) + + e.table[prevHash] = tableEntryPrev{ + Prev: e.table[prevHash].Cur, + Cur: tableEntry{offset: e.cur + s - 1}, + } + x >>= 8 + currHash := hashLen(x, tableBits, hashBytes) + candidates := e.table[currHash] + cv = x + e.table[currHash] = tableEntryPrev{ + Prev: candidates.Cur, + Cur: tableEntry{offset: s + e.cur}, + } + + // Check both candidates + candidate = candidates.Cur + minOffset := e.cur + s - (maxMatchOffset - 4) + + if candidate.offset > minOffset { + if uint32(cv) == load3232(src, candidate.offset-e.cur) { + // Found a match... + continue + } + candidate = candidates.Prev + if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { + // Match at prev... + continue + } + } + cv = x >> 8 + s++ + break + } + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + + emitLiteral(dst, src[nextEmit:]) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/level4.go b/vendor/github.com/klauspost/compress/flate/level4.go index 23c08b325c..16d545fa2d 100644 --- a/vendor/github.com/klauspost/compress/flate/level4.go +++ b/vendor/github.com/klauspost/compress/flate/level4.go @@ -1,221 +1,221 @@ -package flate - -import "fmt" - -type fastEncL4 struct { - fastGen - table [tableSize]tableEntry - bTable [tableSize]tableEntry -} - -func (e *fastEncL4) Encode(dst *tokens, src []byte) { - const ( - inputMargin = 12 - 1 - minNonLiteralBlockSize = 1 + 1 + inputMargin - hashShortBytes = 4 - ) - if debugDeflate && e.cur < 0 { - panic(fmt.Sprint("e.cur < 0: ", e.cur)) - } - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.bTable[:] { - e.bTable[i] = tableEntry{} - } - e.cur = maxMatchOffset - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - maxMatchOffset - for i := range e.table[:] { - v := e.table[i].offset - if v <= minOff { - v = 0 - } else { - v = v - e.cur + maxMatchOffset - } - e.table[i].offset = v - } - for i := range e.bTable[:] { - v := e.bTable[i].offset - if v <= minOff { - v = 0 - } else { - v = v - e.cur + maxMatchOffset - } - e.bTable[i].offset = v - } - e.cur = maxMatchOffset - } - - s := e.addBlock(src) - - // This check isn't in the Snappy implementation, but there, the caller - // instead of the callee handles this case. - if len(src) < minNonLiteralBlockSize { - // We do not fill the token table. - // This will be picked up by caller. - dst.n = uint16(len(src)) - return - } - - // Override src - src = e.hist - nextEmit := s - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := int32(len(src) - inputMargin) - - // nextEmit is where in src the next emitLiteral should start from. - cv := load6432(src, s) - for { - const skipLog = 6 - const doEvery = 1 - - nextS := s - var t int32 - for { - nextHashS := hashLen(cv, tableBits, hashShortBytes) - nextHashL := hash7(cv, tableBits) - - s = nextS - nextS = s + doEvery + (s-nextEmit)>>skipLog - if nextS > sLimit { - goto emitRemainder - } - // Fetch a short+long candidate - sCandidate := e.table[nextHashS] - lCandidate := e.bTable[nextHashL] - next := load6432(src, nextS) - entry := tableEntry{offset: s + e.cur} - e.table[nextHashS] = entry - e.bTable[nextHashL] = entry - - t = lCandidate.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.offset-e.cur) { - // We got a long match. Use that. - break - } - - t = sCandidate.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { - // Found a 4 match... - lCandidate = e.bTable[hash7(next, tableBits)] - - // If the next long is a candidate, check if we should use that instead... - lOff := nextS - (lCandidate.offset - e.cur) - if lOff < maxMatchOffset && load3232(src, lCandidate.offset-e.cur) == uint32(next) { - l1, l2 := matchLen(src[s+4:], src[t+4:]), matchLen(src[nextS+4:], src[nextS-lOff+4:]) - if l2 > l1 { - s = nextS - t = lCandidate.offset - e.cur - } - } - break - } - cv = next - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - - // Extend the 4-byte match as long as possible. - l := e.matchlenLong(s+4, t+4, src) + 4 - - // Extend backwards - for t > 0 && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - if nextEmit < s { - if false { - emitLiteral(dst, src[nextEmit:s]) - } else { - for _, v := range src[nextEmit:s] { - dst.tokens[dst.n] = token(v) - dst.litHist[v]++ - dst.n++ - } - } - } - if debugDeflate { - if t >= s { - panic("s-t") - } - if (s - t) > maxMatchOffset { - panic(fmt.Sprintln("mmo", t)) - } - if l < baseMatchLength { - panic("bml") - } - } - - dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) - s += l - nextEmit = s - if nextS >= s { - s = nextS + 1 - } - - if s >= sLimit { - // Index first pair after match end. - if int(s+8) < len(src) { - cv := load6432(src, s) - e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: s + e.cur} - e.bTable[hash7(cv, tableBits)] = tableEntry{offset: s + e.cur} - } - goto emitRemainder - } - - // Store every 3rd hash in-between - if true { - i := nextS - if i < s-1 { - cv := load6432(src, i) - t := tableEntry{offset: i + e.cur} - t2 := tableEntry{offset: t.offset + 1} - e.bTable[hash7(cv, tableBits)] = t - e.bTable[hash7(cv>>8, tableBits)] = t2 - e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2 - - i += 3 - for ; i < s-1; i += 3 { - cv := load6432(src, i) - t := tableEntry{offset: i + e.cur} - t2 := tableEntry{offset: t.offset + 1} - e.bTable[hash7(cv, tableBits)] = t - e.bTable[hash7(cv>>8, tableBits)] = t2 - e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2 - } - } - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-1 and at s. - x := load6432(src, s-1) - o := e.cur + s - 1 - prevHashS := hashLen(x, tableBits, hashShortBytes) - prevHashL := hash7(x, tableBits) - e.table[prevHashS] = tableEntry{offset: o} - e.bTable[prevHashL] = tableEntry{offset: o} - cv = x >> 8 - } - -emitRemainder: - if int(nextEmit) < len(src) { - // If nothing was added, don't encode literals. - if dst.n == 0 { - return - } - - emitLiteral(dst, src[nextEmit:]) - } -} +package flate + +import "fmt" + +type fastEncL4 struct { + fastGen + table [tableSize]tableEntry + bTable [tableSize]tableEntry +} + +func (e *fastEncL4) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + hashShortBytes = 4 + ) + if debugDeflate && e.cur < 0 { + panic(fmt.Sprint("e.cur < 0: ", e.cur)) + } + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.bTable[:] { + e.bTable[i] = tableEntry{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.table[i].offset = v + } + for i := range e.bTable[:] { + v := e.bTable[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.bTable[i].offset = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load6432(src, s) + for { + const skipLog = 6 + const doEvery = 1 + + nextS := s + var t int32 + for { + nextHashS := hashLen(cv, tableBits, hashShortBytes) + nextHashL := hash7(cv, tableBits) + + s = nextS + nextS = s + doEvery + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + // Fetch a short+long candidate + sCandidate := e.table[nextHashS] + lCandidate := e.bTable[nextHashL] + next := load6432(src, nextS) + entry := tableEntry{offset: s + e.cur} + e.table[nextHashS] = entry + e.bTable[nextHashL] = entry + + t = lCandidate.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.offset-e.cur) { + // We got a long match. Use that. + break + } + + t = sCandidate.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { + // Found a 4 match... + lCandidate = e.bTable[hash7(next, tableBits)] + + // If the next long is a candidate, check if we should use that instead... + lOff := nextS - (lCandidate.offset - e.cur) + if lOff < maxMatchOffset && load3232(src, lCandidate.offset-e.cur) == uint32(next) { + l1, l2 := matchLen(src[s+4:], src[t+4:]), matchLen(src[nextS+4:], src[nextS-lOff+4:]) + if l2 > l1 { + s = nextS + t = lCandidate.offset - e.cur + } + } + break + } + cv = next + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + // Extend the 4-byte match as long as possible. + l := e.matchlenLong(s+4, t+4, src) + 4 + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } + } + if debugDeflate { + if t >= s { + panic("s-t") + } + if (s - t) > maxMatchOffset { + panic(fmt.Sprintln("mmo", t)) + } + if l < baseMatchLength { + panic("bml") + } + } + + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + + if s >= sLimit { + // Index first pair after match end. + if int(s+8) < len(src) { + cv := load6432(src, s) + e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: s + e.cur} + e.bTable[hash7(cv, tableBits)] = tableEntry{offset: s + e.cur} + } + goto emitRemainder + } + + // Store every 3rd hash in-between + if true { + i := nextS + if i < s-1 { + cv := load6432(src, i) + t := tableEntry{offset: i + e.cur} + t2 := tableEntry{offset: t.offset + 1} + e.bTable[hash7(cv, tableBits)] = t + e.bTable[hash7(cv>>8, tableBits)] = t2 + e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2 + + i += 3 + for ; i < s-1; i += 3 { + cv := load6432(src, i) + t := tableEntry{offset: i + e.cur} + t2 := tableEntry{offset: t.offset + 1} + e.bTable[hash7(cv, tableBits)] = t + e.bTable[hash7(cv>>8, tableBits)] = t2 + e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2 + } + } + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. + x := load6432(src, s-1) + o := e.cur + s - 1 + prevHashS := hashLen(x, tableBits, hashShortBytes) + prevHashL := hash7(x, tableBits) + e.table[prevHashS] = tableEntry{offset: o} + e.bTable[prevHashL] = tableEntry{offset: o} + cv = x >> 8 + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + + emitLiteral(dst, src[nextEmit:]) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/level5.go b/vendor/github.com/klauspost/compress/flate/level5.go index 83ef50ba45..3db7532e89 100644 --- a/vendor/github.com/klauspost/compress/flate/level5.go +++ b/vendor/github.com/klauspost/compress/flate/level5.go @@ -1,310 +1,310 @@ -package flate - -import "fmt" - -type fastEncL5 struct { - fastGen - table [tableSize]tableEntry - bTable [tableSize]tableEntryPrev -} - -func (e *fastEncL5) Encode(dst *tokens, src []byte) { - const ( - inputMargin = 12 - 1 - minNonLiteralBlockSize = 1 + 1 + inputMargin - hashShortBytes = 4 - ) - if debugDeflate && e.cur < 0 { - panic(fmt.Sprint("e.cur < 0: ", e.cur)) - } - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.bTable[:] { - e.bTable[i] = tableEntryPrev{} - } - e.cur = maxMatchOffset - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - maxMatchOffset - for i := range e.table[:] { - v := e.table[i].offset - if v <= minOff { - v = 0 - } else { - v = v - e.cur + maxMatchOffset - } - e.table[i].offset = v - } - for i := range e.bTable[:] { - v := e.bTable[i] - if v.Cur.offset <= minOff { - v.Cur.offset = 0 - v.Prev.offset = 0 - } else { - v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset - if v.Prev.offset <= minOff { - v.Prev.offset = 0 - } else { - v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset - } - } - e.bTable[i] = v - } - e.cur = maxMatchOffset - } - - s := e.addBlock(src) - - // This check isn't in the Snappy implementation, but there, the caller - // instead of the callee handles this case. - if len(src) < minNonLiteralBlockSize { - // We do not fill the token table. - // This will be picked up by caller. - dst.n = uint16(len(src)) - return - } - - // Override src - src = e.hist - nextEmit := s - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := int32(len(src) - inputMargin) - - // nextEmit is where in src the next emitLiteral should start from. - cv := load6432(src, s) - for { - const skipLog = 6 - const doEvery = 1 - - nextS := s - var l int32 - var t int32 - for { - nextHashS := hashLen(cv, tableBits, hashShortBytes) - nextHashL := hash7(cv, tableBits) - - s = nextS - nextS = s + doEvery + (s-nextEmit)>>skipLog - if nextS > sLimit { - goto emitRemainder - } - // Fetch a short+long candidate - sCandidate := e.table[nextHashS] - lCandidate := e.bTable[nextHashL] - next := load6432(src, nextS) - entry := tableEntry{offset: s + e.cur} - e.table[nextHashS] = entry - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = entry, eLong.Cur - - nextHashS = hashLen(next, tableBits, hashShortBytes) - nextHashL = hash7(next, tableBits) - - t = lCandidate.Cur.offset - e.cur - if s-t < maxMatchOffset { - if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) { - // Store the next match - e.table[nextHashS] = tableEntry{offset: nextS + e.cur} - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur - - t2 := lCandidate.Prev.offset - e.cur - if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { - l = e.matchlen(s+4, t+4, src) + 4 - ml1 := e.matchlen(s+4, t2+4, src) + 4 - if ml1 > l { - t = t2 - l = ml1 - break - } - } - break - } - t = lCandidate.Prev.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { - // Store the next match - e.table[nextHashS] = tableEntry{offset: nextS + e.cur} - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur - break - } - } - - t = sCandidate.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { - // Found a 4 match... - l = e.matchlen(s+4, t+4, src) + 4 - lCandidate = e.bTable[nextHashL] - // Store the next match - - e.table[nextHashS] = tableEntry{offset: nextS + e.cur} - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur - - // If the next long is a candidate, use that... - t2 := lCandidate.Cur.offset - e.cur - if nextS-t2 < maxMatchOffset { - if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) { - ml := e.matchlen(nextS+4, t2+4, src) + 4 - if ml > l { - t = t2 - s = nextS - l = ml - break - } - } - // If the previous long is a candidate, use that... - t2 = lCandidate.Prev.offset - e.cur - if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) { - ml := e.matchlen(nextS+4, t2+4, src) + 4 - if ml > l { - t = t2 - s = nextS - l = ml - break - } - } - } - break - } - cv = next - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - - if l == 0 { - // Extend the 4-byte match as long as possible. - l = e.matchlenLong(s+4, t+4, src) + 4 - } else if l == maxMatchLength { - l += e.matchlenLong(s+l, t+l, src) - } - - // Try to locate a better match by checking the end of best match... - if sAt := s + l; l < 30 && sAt < sLimit { - // Allow some bytes at the beginning to mismatch. - // Sweet spot is 2/3 bytes depending on input. - // 3 is only a little better when it is but sometimes a lot worse. - // The skipped bytes are tested in Extend backwards, - // and still picked up as part of the match if they do. - const skipBeginning = 2 - eLong := e.bTable[hash7(load6432(src, sAt), tableBits)].Cur.offset - t2 := eLong - e.cur - l + skipBeginning - s2 := s + skipBeginning - off := s2 - t2 - if t2 >= 0 && off < maxMatchOffset && off > 0 { - if l2 := e.matchlenLong(s2, t2, src); l2 > l { - t = t2 - l = l2 - s = s2 - } - } - } - - // Extend backwards - for t > 0 && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - if nextEmit < s { - if false { - emitLiteral(dst, src[nextEmit:s]) - } else { - for _, v := range src[nextEmit:s] { - dst.tokens[dst.n] = token(v) - dst.litHist[v]++ - dst.n++ - } - } - } - if debugDeflate { - if t >= s { - panic(fmt.Sprintln("s-t", s, t)) - } - if (s - t) > maxMatchOffset { - panic(fmt.Sprintln("mmo", s-t)) - } - if l < baseMatchLength { - panic("bml") - } - } - - dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) - s += l - nextEmit = s - if nextS >= s { - s = nextS + 1 - } - - if s >= sLimit { - goto emitRemainder - } - - // Store every 3rd hash in-between. - if true { - const hashEvery = 3 - i := s - l + 1 - if i < s-1 { - cv := load6432(src, i) - t := tableEntry{offset: i + e.cur} - e.table[hashLen(cv, tableBits, hashShortBytes)] = t - eLong := &e.bTable[hash7(cv, tableBits)] - eLong.Cur, eLong.Prev = t, eLong.Cur - - // Do an long at i+1 - cv >>= 8 - t = tableEntry{offset: t.offset + 1} - eLong = &e.bTable[hash7(cv, tableBits)] - eLong.Cur, eLong.Prev = t, eLong.Cur - - // We only have enough bits for a short entry at i+2 - cv >>= 8 - t = tableEntry{offset: t.offset + 1} - e.table[hashLen(cv, tableBits, hashShortBytes)] = t - - // Skip one - otherwise we risk hitting 's' - i += 4 - for ; i < s-1; i += hashEvery { - cv := load6432(src, i) - t := tableEntry{offset: i + e.cur} - t2 := tableEntry{offset: t.offset + 1} - eLong := &e.bTable[hash7(cv, tableBits)] - eLong.Cur, eLong.Prev = t, eLong.Cur - e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2 - } - } - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-1 and at s. - x := load6432(src, s-1) - o := e.cur + s - 1 - prevHashS := hashLen(x, tableBits, hashShortBytes) - prevHashL := hash7(x, tableBits) - e.table[prevHashS] = tableEntry{offset: o} - eLong := &e.bTable[prevHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: o}, eLong.Cur - cv = x >> 8 - } - -emitRemainder: - if int(nextEmit) < len(src) { - // If nothing was added, don't encode literals. - if dst.n == 0 { - return - } - - emitLiteral(dst, src[nextEmit:]) - } -} +package flate + +import "fmt" + +type fastEncL5 struct { + fastGen + table [tableSize]tableEntry + bTable [tableSize]tableEntryPrev +} + +func (e *fastEncL5) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + hashShortBytes = 4 + ) + if debugDeflate && e.cur < 0 { + panic(fmt.Sprint("e.cur < 0: ", e.cur)) + } + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.bTable[:] { + e.bTable[i] = tableEntryPrev{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.table[i].offset = v + } + for i := range e.bTable[:] { + v := e.bTable[i] + if v.Cur.offset <= minOff { + v.Cur.offset = 0 + v.Prev.offset = 0 + } else { + v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset + if v.Prev.offset <= minOff { + v.Prev.offset = 0 + } else { + v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset + } + } + e.bTable[i] = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load6432(src, s) + for { + const skipLog = 6 + const doEvery = 1 + + nextS := s + var l int32 + var t int32 + for { + nextHashS := hashLen(cv, tableBits, hashShortBytes) + nextHashL := hash7(cv, tableBits) + + s = nextS + nextS = s + doEvery + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + // Fetch a short+long candidate + sCandidate := e.table[nextHashS] + lCandidate := e.bTable[nextHashL] + next := load6432(src, nextS) + entry := tableEntry{offset: s + e.cur} + e.table[nextHashS] = entry + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = entry, eLong.Cur + + nextHashS = hashLen(next, tableBits, hashShortBytes) + nextHashL = hash7(next, tableBits) + + t = lCandidate.Cur.offset - e.cur + if s-t < maxMatchOffset { + if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) { + // Store the next match + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur + + t2 := lCandidate.Prev.offset - e.cur + if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { + l = e.matchlen(s+4, t+4, src) + 4 + ml1 := e.matchlen(s+4, t2+4, src) + 4 + if ml1 > l { + t = t2 + l = ml1 + break + } + } + break + } + t = lCandidate.Prev.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { + // Store the next match + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur + break + } + } + + t = sCandidate.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { + // Found a 4 match... + l = e.matchlen(s+4, t+4, src) + 4 + lCandidate = e.bTable[nextHashL] + // Store the next match + + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur + + // If the next long is a candidate, use that... + t2 := lCandidate.Cur.offset - e.cur + if nextS-t2 < maxMatchOffset { + if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) { + ml := e.matchlen(nextS+4, t2+4, src) + 4 + if ml > l { + t = t2 + s = nextS + l = ml + break + } + } + // If the previous long is a candidate, use that... + t2 = lCandidate.Prev.offset - e.cur + if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) { + ml := e.matchlen(nextS+4, t2+4, src) + 4 + if ml > l { + t = t2 + s = nextS + l = ml + break + } + } + } + break + } + cv = next + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + if l == 0 { + // Extend the 4-byte match as long as possible. + l = e.matchlenLong(s+4, t+4, src) + 4 + } else if l == maxMatchLength { + l += e.matchlenLong(s+l, t+l, src) + } + + // Try to locate a better match by checking the end of best match... + if sAt := s + l; l < 30 && sAt < sLimit { + // Allow some bytes at the beginning to mismatch. + // Sweet spot is 2/3 bytes depending on input. + // 3 is only a little better when it is but sometimes a lot worse. + // The skipped bytes are tested in Extend backwards, + // and still picked up as part of the match if they do. + const skipBeginning = 2 + eLong := e.bTable[hash7(load6432(src, sAt), tableBits)].Cur.offset + t2 := eLong - e.cur - l + skipBeginning + s2 := s + skipBeginning + off := s2 - t2 + if t2 >= 0 && off < maxMatchOffset && off > 0 { + if l2 := e.matchlenLong(s2, t2, src); l2 > l { + t = t2 + l = l2 + s = s2 + } + } + } + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } + } + if debugDeflate { + if t >= s { + panic(fmt.Sprintln("s-t", s, t)) + } + if (s - t) > maxMatchOffset { + panic(fmt.Sprintln("mmo", s-t)) + } + if l < baseMatchLength { + panic("bml") + } + } + + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + + if s >= sLimit { + goto emitRemainder + } + + // Store every 3rd hash in-between. + if true { + const hashEvery = 3 + i := s - l + 1 + if i < s-1 { + cv := load6432(src, i) + t := tableEntry{offset: i + e.cur} + e.table[hashLen(cv, tableBits, hashShortBytes)] = t + eLong := &e.bTable[hash7(cv, tableBits)] + eLong.Cur, eLong.Prev = t, eLong.Cur + + // Do an long at i+1 + cv >>= 8 + t = tableEntry{offset: t.offset + 1} + eLong = &e.bTable[hash7(cv, tableBits)] + eLong.Cur, eLong.Prev = t, eLong.Cur + + // We only have enough bits for a short entry at i+2 + cv >>= 8 + t = tableEntry{offset: t.offset + 1} + e.table[hashLen(cv, tableBits, hashShortBytes)] = t + + // Skip one - otherwise we risk hitting 's' + i += 4 + for ; i < s-1; i += hashEvery { + cv := load6432(src, i) + t := tableEntry{offset: i + e.cur} + t2 := tableEntry{offset: t.offset + 1} + eLong := &e.bTable[hash7(cv, tableBits)] + eLong.Cur, eLong.Prev = t, eLong.Cur + e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2 + } + } + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. + x := load6432(src, s-1) + o := e.cur + s - 1 + prevHashS := hashLen(x, tableBits, hashShortBytes) + prevHashL := hash7(x, tableBits) + e.table[prevHashS] = tableEntry{offset: o} + eLong := &e.bTable[prevHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: o}, eLong.Cur + cv = x >> 8 + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + + emitLiteral(dst, src[nextEmit:]) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/level6.go b/vendor/github.com/klauspost/compress/flate/level6.go index f1e9d98fa5..46420c6882 100644 --- a/vendor/github.com/klauspost/compress/flate/level6.go +++ b/vendor/github.com/klauspost/compress/flate/level6.go @@ -1,325 +1,325 @@ -package flate - -import "fmt" - -type fastEncL6 struct { - fastGen - table [tableSize]tableEntry - bTable [tableSize]tableEntryPrev -} - -func (e *fastEncL6) Encode(dst *tokens, src []byte) { - const ( - inputMargin = 12 - 1 - minNonLiteralBlockSize = 1 + 1 + inputMargin - hashShortBytes = 4 - ) - if debugDeflate && e.cur < 0 { - panic(fmt.Sprint("e.cur < 0: ", e.cur)) - } - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.bTable[:] { - e.bTable[i] = tableEntryPrev{} - } - e.cur = maxMatchOffset - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - maxMatchOffset - for i := range e.table[:] { - v := e.table[i].offset - if v <= minOff { - v = 0 - } else { - v = v - e.cur + maxMatchOffset - } - e.table[i].offset = v - } - for i := range e.bTable[:] { - v := e.bTable[i] - if v.Cur.offset <= minOff { - v.Cur.offset = 0 - v.Prev.offset = 0 - } else { - v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset - if v.Prev.offset <= minOff { - v.Prev.offset = 0 - } else { - v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset - } - } - e.bTable[i] = v - } - e.cur = maxMatchOffset - } - - s := e.addBlock(src) - - // This check isn't in the Snappy implementation, but there, the caller - // instead of the callee handles this case. - if len(src) < minNonLiteralBlockSize { - // We do not fill the token table. - // This will be picked up by caller. - dst.n = uint16(len(src)) - return - } - - // Override src - src = e.hist - nextEmit := s - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := int32(len(src) - inputMargin) - - // nextEmit is where in src the next emitLiteral should start from. - cv := load6432(src, s) - // Repeat MUST be > 1 and within range - repeat := int32(1) - for { - const skipLog = 7 - const doEvery = 1 - - nextS := s - var l int32 - var t int32 - for { - nextHashS := hashLen(cv, tableBits, hashShortBytes) - nextHashL := hash7(cv, tableBits) - s = nextS - nextS = s + doEvery + (s-nextEmit)>>skipLog - if nextS > sLimit { - goto emitRemainder - } - // Fetch a short+long candidate - sCandidate := e.table[nextHashS] - lCandidate := e.bTable[nextHashL] - next := load6432(src, nextS) - entry := tableEntry{offset: s + e.cur} - e.table[nextHashS] = entry - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = entry, eLong.Cur - - // Calculate hashes of 'next' - nextHashS = hashLen(next, tableBits, hashShortBytes) - nextHashL = hash7(next, tableBits) - - t = lCandidate.Cur.offset - e.cur - if s-t < maxMatchOffset { - if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) { - // Long candidate matches at least 4 bytes. - - // Store the next match - e.table[nextHashS] = tableEntry{offset: nextS + e.cur} - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur - - // Check the previous long candidate as well. - t2 := lCandidate.Prev.offset - e.cur - if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { - l = e.matchlen(s+4, t+4, src) + 4 - ml1 := e.matchlen(s+4, t2+4, src) + 4 - if ml1 > l { - t = t2 - l = ml1 - break - } - } - break - } - // Current value did not match, but check if previous long value does. - t = lCandidate.Prev.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { - // Store the next match - e.table[nextHashS] = tableEntry{offset: nextS + e.cur} - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur - break - } - } - - t = sCandidate.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { - // Found a 4 match... - l = e.matchlen(s+4, t+4, src) + 4 - - // Look up next long candidate (at nextS) - lCandidate = e.bTable[nextHashL] - - // Store the next match - e.table[nextHashS] = tableEntry{offset: nextS + e.cur} - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur - - // Check repeat at s + repOff - const repOff = 1 - t2 := s - repeat + repOff - if load3232(src, t2) == uint32(cv>>(8*repOff)) { - ml := e.matchlen(s+4+repOff, t2+4, src) + 4 - if ml > l { - t = t2 - l = ml - s += repOff - // Not worth checking more. - break - } - } - - // If the next long is a candidate, use that... - t2 = lCandidate.Cur.offset - e.cur - if nextS-t2 < maxMatchOffset { - if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) { - ml := e.matchlen(nextS+4, t2+4, src) + 4 - if ml > l { - t = t2 - s = nextS - l = ml - // This is ok, but check previous as well. - } - } - // If the previous long is a candidate, use that... - t2 = lCandidate.Prev.offset - e.cur - if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) { - ml := e.matchlen(nextS+4, t2+4, src) + 4 - if ml > l { - t = t2 - s = nextS - l = ml - break - } - } - } - break - } - cv = next - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - - // Extend the 4-byte match as long as possible. - if l == 0 { - l = e.matchlenLong(s+4, t+4, src) + 4 - } else if l == maxMatchLength { - l += e.matchlenLong(s+l, t+l, src) - } - - // Try to locate a better match by checking the end-of-match... - if sAt := s + l; sAt < sLimit { - // Allow some bytes at the beginning to mismatch. - // Sweet spot is 2/3 bytes depending on input. - // 3 is only a little better when it is but sometimes a lot worse. - // The skipped bytes are tested in Extend backwards, - // and still picked up as part of the match if they do. - const skipBeginning = 2 - eLong := &e.bTable[hash7(load6432(src, sAt), tableBits)] - // Test current - t2 := eLong.Cur.offset - e.cur - l + skipBeginning - s2 := s + skipBeginning - off := s2 - t2 - if off < maxMatchOffset { - if off > 0 && t2 >= 0 { - if l2 := e.matchlenLong(s2, t2, src); l2 > l { - t = t2 - l = l2 - s = s2 - } - } - // Test next: - t2 = eLong.Prev.offset - e.cur - l + skipBeginning - off := s2 - t2 - if off > 0 && off < maxMatchOffset && t2 >= 0 { - if l2 := e.matchlenLong(s2, t2, src); l2 > l { - t = t2 - l = l2 - s = s2 - } - } - } - } - - // Extend backwards - for t > 0 && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - if nextEmit < s { - if false { - emitLiteral(dst, src[nextEmit:s]) - } else { - for _, v := range src[nextEmit:s] { - dst.tokens[dst.n] = token(v) - dst.litHist[v]++ - dst.n++ - } - } - } - if false { - if t >= s { - panic(fmt.Sprintln("s-t", s, t)) - } - if (s - t) > maxMatchOffset { - panic(fmt.Sprintln("mmo", s-t)) - } - if l < baseMatchLength { - panic("bml") - } - } - - dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) - repeat = s - t - s += l - nextEmit = s - if nextS >= s { - s = nextS + 1 - } - - if s >= sLimit { - // Index after match end. - for i := nextS + 1; i < int32(len(src))-8; i += 2 { - cv := load6432(src, i) - e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: i + e.cur} - eLong := &e.bTable[hash7(cv, tableBits)] - eLong.Cur, eLong.Prev = tableEntry{offset: i + e.cur}, eLong.Cur - } - goto emitRemainder - } - - // Store every long hash in-between and every second short. - if true { - for i := nextS + 1; i < s-1; i += 2 { - cv := load6432(src, i) - t := tableEntry{offset: i + e.cur} - t2 := tableEntry{offset: t.offset + 1} - eLong := &e.bTable[hash7(cv, tableBits)] - eLong2 := &e.bTable[hash7(cv>>8, tableBits)] - e.table[hashLen(cv, tableBits, hashShortBytes)] = t - eLong.Cur, eLong.Prev = t, eLong.Cur - eLong2.Cur, eLong2.Prev = t2, eLong2.Cur - } - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-1 and at s. - cv = load6432(src, s) - } - -emitRemainder: - if int(nextEmit) < len(src) { - // If nothing was added, don't encode literals. - if dst.n == 0 { - return - } - - emitLiteral(dst, src[nextEmit:]) - } -} +package flate + +import "fmt" + +type fastEncL6 struct { + fastGen + table [tableSize]tableEntry + bTable [tableSize]tableEntryPrev +} + +func (e *fastEncL6) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + hashShortBytes = 4 + ) + if debugDeflate && e.cur < 0 { + panic(fmt.Sprint("e.cur < 0: ", e.cur)) + } + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.bTable[:] { + e.bTable[i] = tableEntryPrev{} + } + e.cur = maxMatchOffset + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - maxMatchOffset + for i := range e.table[:] { + v := e.table[i].offset + if v <= minOff { + v = 0 + } else { + v = v - e.cur + maxMatchOffset + } + e.table[i].offset = v + } + for i := range e.bTable[:] { + v := e.bTable[i] + if v.Cur.offset <= minOff { + v.Cur.offset = 0 + v.Prev.offset = 0 + } else { + v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset + if v.Prev.offset <= minOff { + v.Prev.offset = 0 + } else { + v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset + } + } + e.bTable[i] = v + } + e.cur = maxMatchOffset + } + + s := e.addBlock(src) + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Override src + src = e.hist + nextEmit := s + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load6432(src, s) + // Repeat MUST be > 1 and within range + repeat := int32(1) + for { + const skipLog = 7 + const doEvery = 1 + + nextS := s + var l int32 + var t int32 + for { + nextHashS := hashLen(cv, tableBits, hashShortBytes) + nextHashL := hash7(cv, tableBits) + s = nextS + nextS = s + doEvery + (s-nextEmit)>>skipLog + if nextS > sLimit { + goto emitRemainder + } + // Fetch a short+long candidate + sCandidate := e.table[nextHashS] + lCandidate := e.bTable[nextHashL] + next := load6432(src, nextS) + entry := tableEntry{offset: s + e.cur} + e.table[nextHashS] = entry + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = entry, eLong.Cur + + // Calculate hashes of 'next' + nextHashS = hashLen(next, tableBits, hashShortBytes) + nextHashL = hash7(next, tableBits) + + t = lCandidate.Cur.offset - e.cur + if s-t < maxMatchOffset { + if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) { + // Long candidate matches at least 4 bytes. + + // Store the next match + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur + + // Check the previous long candidate as well. + t2 := lCandidate.Prev.offset - e.cur + if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { + l = e.matchlen(s+4, t+4, src) + 4 + ml1 := e.matchlen(s+4, t2+4, src) + 4 + if ml1 > l { + t = t2 + l = ml1 + break + } + } + break + } + // Current value did not match, but check if previous long value does. + t = lCandidate.Prev.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { + // Store the next match + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur + break + } + } + + t = sCandidate.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { + // Found a 4 match... + l = e.matchlen(s+4, t+4, src) + 4 + + // Look up next long candidate (at nextS) + lCandidate = e.bTable[nextHashL] + + // Store the next match + e.table[nextHashS] = tableEntry{offset: nextS + e.cur} + eLong := &e.bTable[nextHashL] + eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur + + // Check repeat at s + repOff + const repOff = 1 + t2 := s - repeat + repOff + if load3232(src, t2) == uint32(cv>>(8*repOff)) { + ml := e.matchlen(s+4+repOff, t2+4, src) + 4 + if ml > l { + t = t2 + l = ml + s += repOff + // Not worth checking more. + break + } + } + + // If the next long is a candidate, use that... + t2 = lCandidate.Cur.offset - e.cur + if nextS-t2 < maxMatchOffset { + if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) { + ml := e.matchlen(nextS+4, t2+4, src) + 4 + if ml > l { + t = t2 + s = nextS + l = ml + // This is ok, but check previous as well. + } + } + // If the previous long is a candidate, use that... + t2 = lCandidate.Prev.offset - e.cur + if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) { + ml := e.matchlen(nextS+4, t2+4, src) + 4 + if ml > l { + t = t2 + s = nextS + l = ml + break + } + } + } + break + } + cv = next + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + + // Extend the 4-byte match as long as possible. + if l == 0 { + l = e.matchlenLong(s+4, t+4, src) + 4 + } else if l == maxMatchLength { + l += e.matchlenLong(s+l, t+l, src) + } + + // Try to locate a better match by checking the end-of-match... + if sAt := s + l; sAt < sLimit { + // Allow some bytes at the beginning to mismatch. + // Sweet spot is 2/3 bytes depending on input. + // 3 is only a little better when it is but sometimes a lot worse. + // The skipped bytes are tested in Extend backwards, + // and still picked up as part of the match if they do. + const skipBeginning = 2 + eLong := &e.bTable[hash7(load6432(src, sAt), tableBits)] + // Test current + t2 := eLong.Cur.offset - e.cur - l + skipBeginning + s2 := s + skipBeginning + off := s2 - t2 + if off < maxMatchOffset { + if off > 0 && t2 >= 0 { + if l2 := e.matchlenLong(s2, t2, src); l2 > l { + t = t2 + l = l2 + s = s2 + } + } + // Test next: + t2 = eLong.Prev.offset - e.cur - l + skipBeginning + off := s2 - t2 + if off > 0 && off < maxMatchOffset && t2 >= 0 { + if l2 := e.matchlenLong(s2, t2, src); l2 > l { + t = t2 + l = l2 + s = s2 + } + } + } + } + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } + } + if false { + if t >= s { + panic(fmt.Sprintln("s-t", s, t)) + } + if (s - t) > maxMatchOffset { + panic(fmt.Sprintln("mmo", s-t)) + } + if l < baseMatchLength { + panic("bml") + } + } + + dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) + repeat = s - t + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + + if s >= sLimit { + // Index after match end. + for i := nextS + 1; i < int32(len(src))-8; i += 2 { + cv := load6432(src, i) + e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: i + e.cur} + eLong := &e.bTable[hash7(cv, tableBits)] + eLong.Cur, eLong.Prev = tableEntry{offset: i + e.cur}, eLong.Cur + } + goto emitRemainder + } + + // Store every long hash in-between and every second short. + if true { + for i := nextS + 1; i < s-1; i += 2 { + cv := load6432(src, i) + t := tableEntry{offset: i + e.cur} + t2 := tableEntry{offset: t.offset + 1} + eLong := &e.bTable[hash7(cv, tableBits)] + eLong2 := &e.bTable[hash7(cv>>8, tableBits)] + e.table[hashLen(cv, tableBits, hashShortBytes)] = t + eLong.Cur, eLong.Prev = t, eLong.Cur + eLong2.Cur, eLong2.Prev = t2, eLong2.Cur + } + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. + cv = load6432(src, s) + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + + emitLiteral(dst, src[nextEmit:]) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/regmask_amd64.go b/vendor/github.com/klauspost/compress/flate/regmask_amd64.go index 6ed28061b2..b82ead7d17 100644 --- a/vendor/github.com/klauspost/compress/flate/regmask_amd64.go +++ b/vendor/github.com/klauspost/compress/flate/regmask_amd64.go @@ -1,37 +1,37 @@ -package flate - -const ( - // Masks for shifts with register sizes of the shift value. - // This can be used to work around the x86 design of shifting by mod register size. - // It can be used when a variable shift is always smaller than the register size. - - // reg8SizeMaskX - shift value is 8 bits, shifted is X - reg8SizeMask8 = 7 - reg8SizeMask16 = 15 - reg8SizeMask32 = 31 - reg8SizeMask64 = 63 - - // reg16SizeMaskX - shift value is 16 bits, shifted is X - reg16SizeMask8 = reg8SizeMask8 - reg16SizeMask16 = reg8SizeMask16 - reg16SizeMask32 = reg8SizeMask32 - reg16SizeMask64 = reg8SizeMask64 - - // reg32SizeMaskX - shift value is 32 bits, shifted is X - reg32SizeMask8 = reg8SizeMask8 - reg32SizeMask16 = reg8SizeMask16 - reg32SizeMask32 = reg8SizeMask32 - reg32SizeMask64 = reg8SizeMask64 - - // reg64SizeMaskX - shift value is 64 bits, shifted is X - reg64SizeMask8 = reg8SizeMask8 - reg64SizeMask16 = reg8SizeMask16 - reg64SizeMask32 = reg8SizeMask32 - reg64SizeMask64 = reg8SizeMask64 - - // regSizeMaskUintX - shift value is uint, shifted is X - regSizeMaskUint8 = reg8SizeMask8 - regSizeMaskUint16 = reg8SizeMask16 - regSizeMaskUint32 = reg8SizeMask32 - regSizeMaskUint64 = reg8SizeMask64 -) +package flate + +const ( + // Masks for shifts with register sizes of the shift value. + // This can be used to work around the x86 design of shifting by mod register size. + // It can be used when a variable shift is always smaller than the register size. + + // reg8SizeMaskX - shift value is 8 bits, shifted is X + reg8SizeMask8 = 7 + reg8SizeMask16 = 15 + reg8SizeMask32 = 31 + reg8SizeMask64 = 63 + + // reg16SizeMaskX - shift value is 16 bits, shifted is X + reg16SizeMask8 = reg8SizeMask8 + reg16SizeMask16 = reg8SizeMask16 + reg16SizeMask32 = reg8SizeMask32 + reg16SizeMask64 = reg8SizeMask64 + + // reg32SizeMaskX - shift value is 32 bits, shifted is X + reg32SizeMask8 = reg8SizeMask8 + reg32SizeMask16 = reg8SizeMask16 + reg32SizeMask32 = reg8SizeMask32 + reg32SizeMask64 = reg8SizeMask64 + + // reg64SizeMaskX - shift value is 64 bits, shifted is X + reg64SizeMask8 = reg8SizeMask8 + reg64SizeMask16 = reg8SizeMask16 + reg64SizeMask32 = reg8SizeMask32 + reg64SizeMask64 = reg8SizeMask64 + + // regSizeMaskUintX - shift value is uint, shifted is X + regSizeMaskUint8 = reg8SizeMask8 + regSizeMaskUint16 = reg8SizeMask16 + regSizeMaskUint32 = reg8SizeMask32 + regSizeMaskUint64 = reg8SizeMask64 +) diff --git a/vendor/github.com/klauspost/compress/flate/regmask_other.go b/vendor/github.com/klauspost/compress/flate/regmask_other.go index 1b7a2cbd79..d20d14da1b 100644 --- a/vendor/github.com/klauspost/compress/flate/regmask_other.go +++ b/vendor/github.com/klauspost/compress/flate/regmask_other.go @@ -1,40 +1,40 @@ -//go:build !amd64 -// +build !amd64 - -package flate - -const ( - // Masks for shifts with register sizes of the shift value. - // This can be used to work around the x86 design of shifting by mod register size. - // It can be used when a variable shift is always smaller than the register size. - - // reg8SizeMaskX - shift value is 8 bits, shifted is X - reg8SizeMask8 = 0xff - reg8SizeMask16 = 0xff - reg8SizeMask32 = 0xff - reg8SizeMask64 = 0xff - - // reg16SizeMaskX - shift value is 16 bits, shifted is X - reg16SizeMask8 = 0xffff - reg16SizeMask16 = 0xffff - reg16SizeMask32 = 0xffff - reg16SizeMask64 = 0xffff - - // reg32SizeMaskX - shift value is 32 bits, shifted is X - reg32SizeMask8 = 0xffffffff - reg32SizeMask16 = 0xffffffff - reg32SizeMask32 = 0xffffffff - reg32SizeMask64 = 0xffffffff - - // reg64SizeMaskX - shift value is 64 bits, shifted is X - reg64SizeMask8 = 0xffffffffffffffff - reg64SizeMask16 = 0xffffffffffffffff - reg64SizeMask32 = 0xffffffffffffffff - reg64SizeMask64 = 0xffffffffffffffff - - // regSizeMaskUintX - shift value is uint, shifted is X - regSizeMaskUint8 = ^uint(0) - regSizeMaskUint16 = ^uint(0) - regSizeMaskUint32 = ^uint(0) - regSizeMaskUint64 = ^uint(0) -) +//go:build !amd64 +// +build !amd64 + +package flate + +const ( + // Masks for shifts with register sizes of the shift value. + // This can be used to work around the x86 design of shifting by mod register size. + // It can be used when a variable shift is always smaller than the register size. + + // reg8SizeMaskX - shift value is 8 bits, shifted is X + reg8SizeMask8 = 0xff + reg8SizeMask16 = 0xff + reg8SizeMask32 = 0xff + reg8SizeMask64 = 0xff + + // reg16SizeMaskX - shift value is 16 bits, shifted is X + reg16SizeMask8 = 0xffff + reg16SizeMask16 = 0xffff + reg16SizeMask32 = 0xffff + reg16SizeMask64 = 0xffff + + // reg32SizeMaskX - shift value is 32 bits, shifted is X + reg32SizeMask8 = 0xffffffff + reg32SizeMask16 = 0xffffffff + reg32SizeMask32 = 0xffffffff + reg32SizeMask64 = 0xffffffff + + // reg64SizeMaskX - shift value is 64 bits, shifted is X + reg64SizeMask8 = 0xffffffffffffffff + reg64SizeMask16 = 0xffffffffffffffff + reg64SizeMask32 = 0xffffffffffffffff + reg64SizeMask64 = 0xffffffffffffffff + + // regSizeMaskUintX - shift value is uint, shifted is X + regSizeMaskUint8 = ^uint(0) + regSizeMaskUint16 = ^uint(0) + regSizeMaskUint32 = ^uint(0) + regSizeMaskUint64 = ^uint(0) +) diff --git a/vendor/github.com/klauspost/compress/flate/stateless.go b/vendor/github.com/klauspost/compress/flate/stateless.go index f3d4139ef3..e9eef7c654 100644 --- a/vendor/github.com/klauspost/compress/flate/stateless.go +++ b/vendor/github.com/klauspost/compress/flate/stateless.go @@ -1,318 +1,318 @@ -package flate - -import ( - "io" - "math" - "sync" -) - -const ( - maxStatelessBlock = math.MaxInt16 - // dictionary will be taken from maxStatelessBlock, so limit it. - maxStatelessDict = 8 << 10 - - slTableBits = 13 - slTableSize = 1 << slTableBits - slTableShift = 32 - slTableBits -) - -type statelessWriter struct { - dst io.Writer - closed bool -} - -func (s *statelessWriter) Close() error { - if s.closed { - return nil - } - s.closed = true - // Emit EOF block - return StatelessDeflate(s.dst, nil, true, nil) -} - -func (s *statelessWriter) Write(p []byte) (n int, err error) { - err = StatelessDeflate(s.dst, p, false, nil) - if err != nil { - return 0, err - } - return len(p), nil -} - -func (s *statelessWriter) Reset(w io.Writer) { - s.dst = w - s.closed = false -} - -// NewStatelessWriter will do compression but without maintaining any state -// between Write calls. -// There will be no memory kept between Write calls, -// but compression and speed will be suboptimal. -// Because of this, the size of actual Write calls will affect output size. -func NewStatelessWriter(dst io.Writer) io.WriteCloser { - return &statelessWriter{dst: dst} -} - -// bitWriterPool contains bit writers that can be reused. -var bitWriterPool = sync.Pool{ - New: func() interface{} { - return newHuffmanBitWriter(nil) - }, -} - -// StatelessDeflate allows compressing directly to a Writer without retaining state. -// When returning everything will be flushed. -// Up to 8KB of an optional dictionary can be given which is presumed to precede the block. -// Longer dictionaries will be truncated and will still produce valid output. -// Sending nil dictionary is perfectly fine. -func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error { - var dst tokens - bw := bitWriterPool.Get().(*huffmanBitWriter) - bw.reset(out) - defer func() { - // don't keep a reference to our output - bw.reset(nil) - bitWriterPool.Put(bw) - }() - if eof && len(in) == 0 { - // Just write an EOF block. - // Could be faster... - bw.writeStoredHeader(0, true) - bw.flush() - return bw.err - } - - // Truncate dict - if len(dict) > maxStatelessDict { - dict = dict[len(dict)-maxStatelessDict:] - } - - // For subsequent loops, keep shallow dict reference to avoid alloc+copy. - var inDict []byte - - for len(in) > 0 { - todo := in - if len(inDict) > 0 { - if len(todo) > maxStatelessBlock-maxStatelessDict { - todo = todo[:maxStatelessBlock-maxStatelessDict] - } - } else if len(todo) > maxStatelessBlock-len(dict) { - todo = todo[:maxStatelessBlock-len(dict)] - } - inOrg := in - in = in[len(todo):] - uncompressed := todo - if len(dict) > 0 { - // combine dict and source - bufLen := len(todo) + len(dict) - combined := make([]byte, bufLen) - copy(combined, dict) - copy(combined[len(dict):], todo) - todo = combined - } - // Compress - if len(inDict) == 0 { - statelessEnc(&dst, todo, int16(len(dict))) - } else { - statelessEnc(&dst, inDict[:maxStatelessDict+len(todo)], maxStatelessDict) - } - isEof := eof && len(in) == 0 - - if dst.n == 0 { - bw.writeStoredHeader(len(uncompressed), isEof) - if bw.err != nil { - return bw.err - } - bw.writeBytes(uncompressed) - } else if int(dst.n) > len(uncompressed)-len(uncompressed)>>4 { - // If we removed less than 1/16th, huffman compress the block. - bw.writeBlockHuff(isEof, uncompressed, len(in) == 0) - } else { - bw.writeBlockDynamic(&dst, isEof, uncompressed, len(in) == 0) - } - if len(in) > 0 { - // Retain a dict if we have more - inDict = inOrg[len(uncompressed)-maxStatelessDict:] - dict = nil - dst.Reset() - } - if bw.err != nil { - return bw.err - } - } - if !eof { - // Align, only a stored block can do that. - bw.writeStoredHeader(0, false) - } - bw.flush() - return bw.err -} - -func hashSL(u uint32) uint32 { - return (u * 0x1e35a7bd) >> slTableShift -} - -func load3216(b []byte, i int16) uint32 { - // Help the compiler eliminate bounds checks on the read so it can be done in a single read. - b = b[i:] - b = b[:4] - return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 -} - -func load6416(b []byte, i int16) uint64 { - // Help the compiler eliminate bounds checks on the read so it can be done in a single read. - b = b[i:] - b = b[:8] - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | - uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 -} - -func statelessEnc(dst *tokens, src []byte, startAt int16) { - const ( - inputMargin = 12 - 1 - minNonLiteralBlockSize = 1 + 1 + inputMargin - ) - - type tableEntry struct { - offset int16 - } - - var table [slTableSize]tableEntry - - // This check isn't in the Snappy implementation, but there, the caller - // instead of the callee handles this case. - if len(src)-int(startAt) < minNonLiteralBlockSize { - // We do not fill the token table. - // This will be picked up by caller. - dst.n = 0 - return - } - // Index until startAt - if startAt > 0 { - cv := load3232(src, 0) - for i := int16(0); i < startAt; i++ { - table[hashSL(cv)] = tableEntry{offset: i} - cv = (cv >> 8) | (uint32(src[i+4]) << 24) - } - } - - s := startAt + 1 - nextEmit := startAt - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := int16(len(src) - inputMargin) - - // nextEmit is where in src the next emitLiteral should start from. - cv := load3216(src, s) - - for { - const skipLog = 5 - const doEvery = 2 - - nextS := s - var candidate tableEntry - for { - nextHash := hashSL(cv) - candidate = table[nextHash] - nextS = s + doEvery + (s-nextEmit)>>skipLog - if nextS > sLimit || nextS <= 0 { - goto emitRemainder - } - - now := load6416(src, nextS) - table[nextHash] = tableEntry{offset: s} - nextHash = hashSL(uint32(now)) - - if cv == load3216(src, candidate.offset) { - table[nextHash] = tableEntry{offset: nextS} - break - } - - // Do one right away... - cv = uint32(now) - s = nextS - nextS++ - candidate = table[nextHash] - now >>= 8 - table[nextHash] = tableEntry{offset: s} - - if cv == load3216(src, candidate.offset) { - table[nextHash] = tableEntry{offset: nextS} - break - } - cv = uint32(now) - s = nextS - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - for { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - - // Extend the 4-byte match as long as possible. - t := candidate.offset - l := int16(matchLen(src[s+4:], src[t+4:]) + 4) - - // Extend backwards - for t > 0 && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - if nextEmit < s { - if false { - emitLiteral(dst, src[nextEmit:s]) - } else { - for _, v := range src[nextEmit:s] { - dst.tokens[dst.n] = token(v) - dst.litHist[v]++ - dst.n++ - } - } - } - - // Save the match found - dst.AddMatchLong(int32(l), uint32(s-t-baseMatchOffset)) - s += l - nextEmit = s - if nextS >= s { - s = nextS + 1 - } - if s >= sLimit { - goto emitRemainder - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-2 and at s. If - // another emitCopy is not our next move, also calculate nextHash - // at s+1. At least on GOARCH=amd64, these three hash calculations - // are faster as one load64 call (with some shifts) instead of - // three load32 calls. - x := load6416(src, s-2) - o := s - 2 - prevHash := hashSL(uint32(x)) - table[prevHash] = tableEntry{offset: o} - x >>= 16 - currHash := hashSL(uint32(x)) - candidate = table[currHash] - table[currHash] = tableEntry{offset: o + 2} - - if uint32(x) != load3216(src, candidate.offset) { - cv = uint32(x >> 8) - s++ - break - } - } - } - -emitRemainder: - if int(nextEmit) < len(src) { - // If nothing was added, don't encode literals. - if dst.n == 0 { - return - } - emitLiteral(dst, src[nextEmit:]) - } -} +package flate + +import ( + "io" + "math" + "sync" +) + +const ( + maxStatelessBlock = math.MaxInt16 + // dictionary will be taken from maxStatelessBlock, so limit it. + maxStatelessDict = 8 << 10 + + slTableBits = 13 + slTableSize = 1 << slTableBits + slTableShift = 32 - slTableBits +) + +type statelessWriter struct { + dst io.Writer + closed bool +} + +func (s *statelessWriter) Close() error { + if s.closed { + return nil + } + s.closed = true + // Emit EOF block + return StatelessDeflate(s.dst, nil, true, nil) +} + +func (s *statelessWriter) Write(p []byte) (n int, err error) { + err = StatelessDeflate(s.dst, p, false, nil) + if err != nil { + return 0, err + } + return len(p), nil +} + +func (s *statelessWriter) Reset(w io.Writer) { + s.dst = w + s.closed = false +} + +// NewStatelessWriter will do compression but without maintaining any state +// between Write calls. +// There will be no memory kept between Write calls, +// but compression and speed will be suboptimal. +// Because of this, the size of actual Write calls will affect output size. +func NewStatelessWriter(dst io.Writer) io.WriteCloser { + return &statelessWriter{dst: dst} +} + +// bitWriterPool contains bit writers that can be reused. +var bitWriterPool = sync.Pool{ + New: func() interface{} { + return newHuffmanBitWriter(nil) + }, +} + +// StatelessDeflate allows compressing directly to a Writer without retaining state. +// When returning everything will be flushed. +// Up to 8KB of an optional dictionary can be given which is presumed to precede the block. +// Longer dictionaries will be truncated and will still produce valid output. +// Sending nil dictionary is perfectly fine. +func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error { + var dst tokens + bw := bitWriterPool.Get().(*huffmanBitWriter) + bw.reset(out) + defer func() { + // don't keep a reference to our output + bw.reset(nil) + bitWriterPool.Put(bw) + }() + if eof && len(in) == 0 { + // Just write an EOF block. + // Could be faster... + bw.writeStoredHeader(0, true) + bw.flush() + return bw.err + } + + // Truncate dict + if len(dict) > maxStatelessDict { + dict = dict[len(dict)-maxStatelessDict:] + } + + // For subsequent loops, keep shallow dict reference to avoid alloc+copy. + var inDict []byte + + for len(in) > 0 { + todo := in + if len(inDict) > 0 { + if len(todo) > maxStatelessBlock-maxStatelessDict { + todo = todo[:maxStatelessBlock-maxStatelessDict] + } + } else if len(todo) > maxStatelessBlock-len(dict) { + todo = todo[:maxStatelessBlock-len(dict)] + } + inOrg := in + in = in[len(todo):] + uncompressed := todo + if len(dict) > 0 { + // combine dict and source + bufLen := len(todo) + len(dict) + combined := make([]byte, bufLen) + copy(combined, dict) + copy(combined[len(dict):], todo) + todo = combined + } + // Compress + if len(inDict) == 0 { + statelessEnc(&dst, todo, int16(len(dict))) + } else { + statelessEnc(&dst, inDict[:maxStatelessDict+len(todo)], maxStatelessDict) + } + isEof := eof && len(in) == 0 + + if dst.n == 0 { + bw.writeStoredHeader(len(uncompressed), isEof) + if bw.err != nil { + return bw.err + } + bw.writeBytes(uncompressed) + } else if int(dst.n) > len(uncompressed)-len(uncompressed)>>4 { + // If we removed less than 1/16th, huffman compress the block. + bw.writeBlockHuff(isEof, uncompressed, len(in) == 0) + } else { + bw.writeBlockDynamic(&dst, isEof, uncompressed, len(in) == 0) + } + if len(in) > 0 { + // Retain a dict if we have more + inDict = inOrg[len(uncompressed)-maxStatelessDict:] + dict = nil + dst.Reset() + } + if bw.err != nil { + return bw.err + } + } + if !eof { + // Align, only a stored block can do that. + bw.writeStoredHeader(0, false) + } + bw.flush() + return bw.err +} + +func hashSL(u uint32) uint32 { + return (u * 0x1e35a7bd) >> slTableShift +} + +func load3216(b []byte, i int16) uint32 { + // Help the compiler eliminate bounds checks on the read so it can be done in a single read. + b = b[i:] + b = b[:4] + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load6416(b []byte, i int16) uint64 { + // Help the compiler eliminate bounds checks on the read so it can be done in a single read. + b = b[i:] + b = b[:8] + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +func statelessEnc(dst *tokens, src []byte, startAt int16) { + const ( + inputMargin = 12 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + type tableEntry struct { + offset int16 + } + + var table [slTableSize]tableEntry + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src)-int(startAt) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = 0 + return + } + // Index until startAt + if startAt > 0 { + cv := load3232(src, 0) + for i := int16(0); i < startAt; i++ { + table[hashSL(cv)] = tableEntry{offset: i} + cv = (cv >> 8) | (uint32(src[i+4]) << 24) + } + } + + s := startAt + 1 + nextEmit := startAt + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int16(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + cv := load3216(src, s) + + for { + const skipLog = 5 + const doEvery = 2 + + nextS := s + var candidate tableEntry + for { + nextHash := hashSL(cv) + candidate = table[nextHash] + nextS = s + doEvery + (s-nextEmit)>>skipLog + if nextS > sLimit || nextS <= 0 { + goto emitRemainder + } + + now := load6416(src, nextS) + table[nextHash] = tableEntry{offset: s} + nextHash = hashSL(uint32(now)) + + if cv == load3216(src, candidate.offset) { + table[nextHash] = tableEntry{offset: nextS} + break + } + + // Do one right away... + cv = uint32(now) + s = nextS + nextS++ + candidate = table[nextHash] + now >>= 8 + table[nextHash] = tableEntry{offset: s} + + if cv == load3216(src, candidate.offset) { + table[nextHash] = tableEntry{offset: nextS} + break + } + cv = uint32(now) + s = nextS + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + + // Extend the 4-byte match as long as possible. + t := candidate.offset + l := int16(matchLen(src[s+4:], src[t+4:]) + 4) + + // Extend backwards + for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + if nextEmit < s { + if false { + emitLiteral(dst, src[nextEmit:s]) + } else { + for _, v := range src[nextEmit:s] { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } + } + } + + // Save the match found + dst.AddMatchLong(int32(l), uint32(s-t-baseMatchOffset)) + s += l + nextEmit = s + if nextS >= s { + s = nextS + 1 + } + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-2 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load6416(src, s-2) + o := s - 2 + prevHash := hashSL(uint32(x)) + table[prevHash] = tableEntry{offset: o} + x >>= 16 + currHash := hashSL(uint32(x)) + candidate = table[currHash] + table[currHash] = tableEntry{offset: o + 2} + + if uint32(x) != load3216(src, candidate.offset) { + cv = uint32(x >> 8) + s++ + break + } + } + } + +emitRemainder: + if int(nextEmit) < len(src) { + // If nothing was added, don't encode literals. + if dst.n == 0 { + return + } + emitLiteral(dst, src[nextEmit:]) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/token.go b/vendor/github.com/klauspost/compress/flate/token.go index d818790c13..3f7bf4f6f5 100644 --- a/vendor/github.com/klauspost/compress/flate/token.go +++ b/vendor/github.com/klauspost/compress/flate/token.go @@ -1,379 +1,379 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flate - -import ( - "bytes" - "encoding/binary" - "fmt" - "io" - "math" -) - -const ( - // bits 0-16 xoffset = offset - MIN_OFFSET_SIZE, or literal - 16 bits - // bits 16-22 offsetcode - 5 bits - // bits 22-30 xlength = length - MIN_MATCH_LENGTH - 8 bits - // bits 30-32 type 0 = literal 1=EOF 2=Match 3=Unused - 2 bits - lengthShift = 22 - offsetMask = 1<maxnumlit - offHist [32]uint16 // offset codes - litHist [256]uint16 // codes 0->255 - nFilled int - n uint16 // Must be able to contain maxStoreBlockSize - tokens [maxStoreBlockSize + 1]token -} - -func (t *tokens) Reset() { - if t.n == 0 { - return - } - t.n = 0 - t.nFilled = 0 - for i := range t.litHist[:] { - t.litHist[i] = 0 - } - for i := range t.extraHist[:] { - t.extraHist[i] = 0 - } - for i := range t.offHist[:] { - t.offHist[i] = 0 - } -} - -func (t *tokens) Fill() { - if t.n == 0 { - return - } - for i, v := range t.litHist[:] { - if v == 0 { - t.litHist[i] = 1 - t.nFilled++ - } - } - for i, v := range t.extraHist[:literalCount-256] { - if v == 0 { - t.nFilled++ - t.extraHist[i] = 1 - } - } - for i, v := range t.offHist[:offsetCodeCount] { - if v == 0 { - t.offHist[i] = 1 - } - } -} - -func indexTokens(in []token) tokens { - var t tokens - t.indexTokens(in) - return t -} - -func (t *tokens) indexTokens(in []token) { - t.Reset() - for _, tok := range in { - if tok < matchType { - t.AddLiteral(tok.literal()) - continue - } - t.AddMatch(uint32(tok.length()), tok.offset()&matchOffsetOnlyMask) - } -} - -// emitLiteral writes a literal chunk and returns the number of bytes written. -func emitLiteral(dst *tokens, lit []byte) { - for _, v := range lit { - dst.tokens[dst.n] = token(v) - dst.litHist[v]++ - dst.n++ - } -} - -func (t *tokens) AddLiteral(lit byte) { - t.tokens[t.n] = token(lit) - t.litHist[lit]++ - t.n++ -} - -// from https://stackoverflow.com/a/28730362 -func mFastLog2(val float32) float32 { - ux := int32(math.Float32bits(val)) - log2 := (float32)(((ux >> 23) & 255) - 128) - ux &= -0x7f800001 - ux += 127 << 23 - uval := math.Float32frombits(uint32(ux)) - log2 += ((-0.34484843)*uval+2.02466578)*uval - 0.67487759 - return log2 -} - -// EstimatedBits will return an minimum size estimated by an *optimal* -// compression of the block. -// The size of the block -func (t *tokens) EstimatedBits() int { - shannon := float32(0) - bits := int(0) - nMatches := 0 - total := int(t.n) + t.nFilled - if total > 0 { - invTotal := 1.0 / float32(total) - for _, v := range t.litHist[:] { - if v > 0 { - n := float32(v) - shannon += atLeastOne(-mFastLog2(n*invTotal)) * n - } - } - // Just add 15 for EOB - shannon += 15 - for i, v := range t.extraHist[1 : literalCount-256] { - if v > 0 { - n := float32(v) - shannon += atLeastOne(-mFastLog2(n*invTotal)) * n - bits += int(lengthExtraBits[i&31]) * int(v) - nMatches += int(v) - } - } - } - if nMatches > 0 { - invTotal := 1.0 / float32(nMatches) - for i, v := range t.offHist[:offsetCodeCount] { - if v > 0 { - n := float32(v) - shannon += atLeastOne(-mFastLog2(n*invTotal)) * n - bits += int(offsetExtraBits[i&31]) * int(v) - } - } - } - return int(shannon) + bits -} - -// AddMatch adds a match to the tokens. -// This function is very sensitive to inlining and right on the border. -func (t *tokens) AddMatch(xlength uint32, xoffset uint32) { - if debugDeflate { - if xlength >= maxMatchLength+baseMatchLength { - panic(fmt.Errorf("invalid length: %v", xlength)) - } - if xoffset >= maxMatchOffset+baseMatchOffset { - panic(fmt.Errorf("invalid offset: %v", xoffset)) - } - } - oCode := offsetCode(xoffset) - xoffset |= oCode << 16 - - t.extraHist[lengthCodes1[uint8(xlength)]]++ - t.offHist[oCode&31]++ - t.tokens[t.n] = token(matchType | xlength<= maxMatchOffset+baseMatchOffset { - panic(fmt.Errorf("invalid offset: %v", xoffset)) - } - } - oc := offsetCode(xoffset) - xoffset |= oc << 16 - for xlength > 0 { - xl := xlength - if xl > 258 { - // We need to have at least baseMatchLength left over for next loop. - if xl > 258+baseMatchLength { - xl = 258 - } else { - xl = 258 - baseMatchLength - } - } - xlength -= xl - xl -= baseMatchLength - t.extraHist[lengthCodes1[uint8(xl)]]++ - t.offHist[oc&31]++ - t.tokens[t.n] = token(matchType | uint32(xl)<> lengthShift) } - -// Convert length to code. -func lengthCode(len uint8) uint8 { return lengthCodes[len] } - -// Returns the offset code corresponding to a specific offset -func offsetCode(off uint32) uint32 { - if false { - if off < uint32(len(offsetCodes)) { - return offsetCodes[off&255] - } else if off>>7 < uint32(len(offsetCodes)) { - return offsetCodes[(off>>7)&255] + 14 - } else { - return offsetCodes[(off>>14)&255] + 28 - } - } - if off < uint32(len(offsetCodes)) { - return offsetCodes[uint8(off)] - } - return offsetCodes14[uint8(off>>7)] -} +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "math" +) + +const ( + // bits 0-16 xoffset = offset - MIN_OFFSET_SIZE, or literal - 16 bits + // bits 16-22 offsetcode - 5 bits + // bits 22-30 xlength = length - MIN_MATCH_LENGTH - 8 bits + // bits 30-32 type 0 = literal 1=EOF 2=Match 3=Unused - 2 bits + lengthShift = 22 + offsetMask = 1<maxnumlit + offHist [32]uint16 // offset codes + litHist [256]uint16 // codes 0->255 + nFilled int + n uint16 // Must be able to contain maxStoreBlockSize + tokens [maxStoreBlockSize + 1]token +} + +func (t *tokens) Reset() { + if t.n == 0 { + return + } + t.n = 0 + t.nFilled = 0 + for i := range t.litHist[:] { + t.litHist[i] = 0 + } + for i := range t.extraHist[:] { + t.extraHist[i] = 0 + } + for i := range t.offHist[:] { + t.offHist[i] = 0 + } +} + +func (t *tokens) Fill() { + if t.n == 0 { + return + } + for i, v := range t.litHist[:] { + if v == 0 { + t.litHist[i] = 1 + t.nFilled++ + } + } + for i, v := range t.extraHist[:literalCount-256] { + if v == 0 { + t.nFilled++ + t.extraHist[i] = 1 + } + } + for i, v := range t.offHist[:offsetCodeCount] { + if v == 0 { + t.offHist[i] = 1 + } + } +} + +func indexTokens(in []token) tokens { + var t tokens + t.indexTokens(in) + return t +} + +func (t *tokens) indexTokens(in []token) { + t.Reset() + for _, tok := range in { + if tok < matchType { + t.AddLiteral(tok.literal()) + continue + } + t.AddMatch(uint32(tok.length()), tok.offset()&matchOffsetOnlyMask) + } +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +func emitLiteral(dst *tokens, lit []byte) { + for _, v := range lit { + dst.tokens[dst.n] = token(v) + dst.litHist[v]++ + dst.n++ + } +} + +func (t *tokens) AddLiteral(lit byte) { + t.tokens[t.n] = token(lit) + t.litHist[lit]++ + t.n++ +} + +// from https://stackoverflow.com/a/28730362 +func mFastLog2(val float32) float32 { + ux := int32(math.Float32bits(val)) + log2 := (float32)(((ux >> 23) & 255) - 128) + ux &= -0x7f800001 + ux += 127 << 23 + uval := math.Float32frombits(uint32(ux)) + log2 += ((-0.34484843)*uval+2.02466578)*uval - 0.67487759 + return log2 +} + +// EstimatedBits will return an minimum size estimated by an *optimal* +// compression of the block. +// The size of the block +func (t *tokens) EstimatedBits() int { + shannon := float32(0) + bits := int(0) + nMatches := 0 + total := int(t.n) + t.nFilled + if total > 0 { + invTotal := 1.0 / float32(total) + for _, v := range t.litHist[:] { + if v > 0 { + n := float32(v) + shannon += atLeastOne(-mFastLog2(n*invTotal)) * n + } + } + // Just add 15 for EOB + shannon += 15 + for i, v := range t.extraHist[1 : literalCount-256] { + if v > 0 { + n := float32(v) + shannon += atLeastOne(-mFastLog2(n*invTotal)) * n + bits += int(lengthExtraBits[i&31]) * int(v) + nMatches += int(v) + } + } + } + if nMatches > 0 { + invTotal := 1.0 / float32(nMatches) + for i, v := range t.offHist[:offsetCodeCount] { + if v > 0 { + n := float32(v) + shannon += atLeastOne(-mFastLog2(n*invTotal)) * n + bits += int(offsetExtraBits[i&31]) * int(v) + } + } + } + return int(shannon) + bits +} + +// AddMatch adds a match to the tokens. +// This function is very sensitive to inlining and right on the border. +func (t *tokens) AddMatch(xlength uint32, xoffset uint32) { + if debugDeflate { + if xlength >= maxMatchLength+baseMatchLength { + panic(fmt.Errorf("invalid length: %v", xlength)) + } + if xoffset >= maxMatchOffset+baseMatchOffset { + panic(fmt.Errorf("invalid offset: %v", xoffset)) + } + } + oCode := offsetCode(xoffset) + xoffset |= oCode << 16 + + t.extraHist[lengthCodes1[uint8(xlength)]]++ + t.offHist[oCode&31]++ + t.tokens[t.n] = token(matchType | xlength<= maxMatchOffset+baseMatchOffset { + panic(fmt.Errorf("invalid offset: %v", xoffset)) + } + } + oc := offsetCode(xoffset) + xoffset |= oc << 16 + for xlength > 0 { + xl := xlength + if xl > 258 { + // We need to have at least baseMatchLength left over for next loop. + if xl > 258+baseMatchLength { + xl = 258 + } else { + xl = 258 - baseMatchLength + } + } + xlength -= xl + xl -= baseMatchLength + t.extraHist[lengthCodes1[uint8(xl)]]++ + t.offHist[oc&31]++ + t.tokens[t.n] = token(matchType | uint32(xl)<> lengthShift) } + +// Convert length to code. +func lengthCode(len uint8) uint8 { return lengthCodes[len] } + +// Returns the offset code corresponding to a specific offset +func offsetCode(off uint32) uint32 { + if false { + if off < uint32(len(offsetCodes)) { + return offsetCodes[off&255] + } else if off>>7 < uint32(len(offsetCodes)) { + return offsetCodes[(off>>7)&255] + 14 + } else { + return offsetCodes[(off>>14)&255] + 28 + } + } + if off < uint32(len(offsetCodes)) { + return offsetCodes[uint8(off)] + } + return offsetCodes14[uint8(off>>7)] +} diff --git a/vendor/github.com/libsql/sqlite-antlr4-parser/sqliteparser/SQLiteLexer.interp b/vendor/github.com/libsql/sqlite-antlr4-parser/sqliteparser/SQLiteLexer.interp index 1670a80ae6..2784293c31 100644 --- a/vendor/github.com/libsql/sqlite-antlr4-parser/sqliteparser/SQLiteLexer.interp +++ b/vendor/github.com/libsql/sqlite-antlr4-parser/sqliteparser/SQLiteLexer.interp @@ -1,598 +1,598 @@ -token literal names: -null -';' -'.' -'(' -')' -',' -'=' -'*' -'+' -'-' -'~' -'||' -'/' -'%' -'<<' -'>>' -'&' -'|' -'<' -'<=' -'>' -'>=' -'==' -'!=' -'<>' -'ABORT' -'ACTION' -'ADD' -'AFTER' -'ALL' -'ALTER' -'ANALYZE' -'AND' -'AS' -'ASC' -'ATTACH' -'AUTOINCREMENT' -'BEFORE' -'BEGIN' -'BETWEEN' -'BY' -'CASCADE' -'CASE' -'CAST' -'CHECK' -'COLLATE' -'COLUMN' -'COMMIT' -'CONFLICT' -'CONSTRAINT' -'CREATE' -'CROSS' -'CURRENT_DATE' -'CURRENT_TIME' -'CURRENT_TIMESTAMP' -'DATABASE' -'DEFAULT' -'DEFERRABLE' -'DEFERRED' -'DELETE' -'DESC' -'DETACH' -'DISTINCT' -'DROP' -'EACH' -'ELSE' -'END' -'ESCAPE' -'EXCEPT' -'EXCLUSIVE' -'EXISTS' -'EXPLAIN' -'FAIL' -'FOR' -'FOREIGN' -'FROM' -'FULL' -'GLOB' -'GROUP' -'HAVING' -'IF' -'IGNORE' -'IMMEDIATE' -'IN' -'INDEX' -'INDEXED' -'INITIALLY' -'INNER' -'INSERT' -'INSTEAD' -'INTERSECT' -'INTO' -'IS' -'ISNULL' -'JOIN' -'KEY' -'LEFT' -'LIKE' -'LIMIT' -'MATCH' -'NATURAL' -'NO' -'NOT' -'NOTNULL' -'NULL' -'OF' -'OFFSET' -'ON' -'OR' -'ORDER' -'OUTER' -'PLAN' -'PRAGMA' -'PRIMARY' -'QUERY' -'RAISE' -'RECURSIVE' -'REFERENCES' -'REGEXP' -'REINDEX' -'RELEASE' -'RENAME' -'REPLACE' -'RESTRICT' -'RETURNING' -'RIGHT' -'ROLLBACK' -'ROW' -'ROWS' -'SAVEPOINT' -'SELECT' -'SET' -'TABLE' -'TEMP' -'TEMPORARY' -'THEN' -'TO' -'TRANSACTION' -'TRIGGER' -'UNION' -'UNIQUE' -'UPDATE' -'USING' -'VACUUM' -'VALUES' -'VIEW' -'VIRTUAL' -'WHEN' -'WHERE' -'WITH' -'WITHOUT' -'FIRST_VALUE' -'OVER' -'PARTITION' -'RANGE' -'PRECEDING' -'UNBOUNDED' -'CURRENT' -'FOLLOWING' -'CUME_DIST' -'DENSE_RANK' -'LAG' -'LAST_VALUE' -'LEAD' -'NTH_VALUE' -'NTILE' -'PERCENT_RANK' -'RANK' -'ROW_NUMBER' -'GENERATED' -'ALWAYS' -'STORED' -'TRUE' -'FALSE' -'WINDOW' -'NULLS' -'FIRST' -'LAST' -'FILTER' -'GROUPS' -'EXCLUDE' -'TIES' -'OTHERS' -'DO' -'NOTHING' -null -null -null -null -null -null -null -null -null - -token symbolic names: -null -SCOL -DOT -OPEN_PAR -CLOSE_PAR -COMMA -ASSIGN -STAR -PLUS -MINUS -TILDE -PIPE2 -DIV -MOD -LT2 -GT2 -AMP -PIPE -LT -LT_EQ -GT -GT_EQ -EQ -NOT_EQ1 -NOT_EQ2 -ABORT_ -ACTION_ -ADD_ -AFTER_ -ALL_ -ALTER_ -ANALYZE_ -AND_ -AS_ -ASC_ -ATTACH_ -AUTOINCREMENT_ -BEFORE_ -BEGIN_ -BETWEEN_ -BY_ -CASCADE_ -CASE_ -CAST_ -CHECK_ -COLLATE_ -COLUMN_ -COMMIT_ -CONFLICT_ -CONSTRAINT_ -CREATE_ -CROSS_ -CURRENT_DATE_ -CURRENT_TIME_ -CURRENT_TIMESTAMP_ -DATABASE_ -DEFAULT_ -DEFERRABLE_ -DEFERRED_ -DELETE_ -DESC_ -DETACH_ -DISTINCT_ -DROP_ -EACH_ -ELSE_ -END_ -ESCAPE_ -EXCEPT_ -EXCLUSIVE_ -EXISTS_ -EXPLAIN_ -FAIL_ -FOR_ -FOREIGN_ -FROM_ -FULL_ -GLOB_ -GROUP_ -HAVING_ -IF_ -IGNORE_ -IMMEDIATE_ -IN_ -INDEX_ -INDEXED_ -INITIALLY_ -INNER_ -INSERT_ -INSTEAD_ -INTERSECT_ -INTO_ -IS_ -ISNULL_ -JOIN_ -KEY_ -LEFT_ -LIKE_ -LIMIT_ -MATCH_ -NATURAL_ -NO_ -NOT_ -NOTNULL_ -NULL_ -OF_ -OFFSET_ -ON_ -OR_ -ORDER_ -OUTER_ -PLAN_ -PRAGMA_ -PRIMARY_ -QUERY_ -RAISE_ -RECURSIVE_ -REFERENCES_ -REGEXP_ -REINDEX_ -RELEASE_ -RENAME_ -REPLACE_ -RESTRICT_ -RETURNING_ -RIGHT_ -ROLLBACK_ -ROW_ -ROWS_ -SAVEPOINT_ -SELECT_ -SET_ -TABLE_ -TEMP_ -TEMPORARY_ -THEN_ -TO_ -TRANSACTION_ -TRIGGER_ -UNION_ -UNIQUE_ -UPDATE_ -USING_ -VACUUM_ -VALUES_ -VIEW_ -VIRTUAL_ -WHEN_ -WHERE_ -WITH_ -WITHOUT_ -FIRST_VALUE_ -OVER_ -PARTITION_ -RANGE_ -PRECEDING_ -UNBOUNDED_ -CURRENT_ -FOLLOWING_ -CUME_DIST_ -DENSE_RANK_ -LAG_ -LAST_VALUE_ -LEAD_ -NTH_VALUE_ -NTILE_ -PERCENT_RANK_ -RANK_ -ROW_NUMBER_ -GENERATED_ -ALWAYS_ -STORED_ -TRUE_ -FALSE_ -WINDOW_ -NULLS_ -FIRST_ -LAST_ -FILTER_ -GROUPS_ -EXCLUDE_ -TIES_ -OTHERS_ -DO_ -NOTHING_ -IDENTIFIER -NUMERIC_LITERAL -BIND_PARAMETER -STRING_LITERAL -BLOB_LITERAL -SINGLE_LINE_COMMENT -MULTILINE_COMMENT -SPACES -UNEXPECTED_CHAR - -rule names: -SCOL -DOT -OPEN_PAR -CLOSE_PAR -COMMA -ASSIGN -STAR -PLUS -MINUS -TILDE -PIPE2 -DIV -MOD -LT2 -GT2 -AMP -PIPE -LT -LT_EQ -GT -GT_EQ -EQ -NOT_EQ1 -NOT_EQ2 -ABORT_ -ACTION_ -ADD_ -AFTER_ -ALL_ -ALTER_ -ANALYZE_ -AND_ -AS_ -ASC_ -ATTACH_ -AUTOINCREMENT_ -BEFORE_ -BEGIN_ -BETWEEN_ -BY_ -CASCADE_ -CASE_ -CAST_ -CHECK_ -COLLATE_ -COLUMN_ -COMMIT_ -CONFLICT_ -CONSTRAINT_ -CREATE_ -CROSS_ -CURRENT_DATE_ -CURRENT_TIME_ -CURRENT_TIMESTAMP_ -DATABASE_ -DEFAULT_ -DEFERRABLE_ -DEFERRED_ -DELETE_ -DESC_ -DETACH_ -DISTINCT_ -DROP_ -EACH_ -ELSE_ -END_ -ESCAPE_ -EXCEPT_ -EXCLUSIVE_ -EXISTS_ -EXPLAIN_ -FAIL_ -FOR_ -FOREIGN_ -FROM_ -FULL_ -GLOB_ -GROUP_ -HAVING_ -IF_ -IGNORE_ -IMMEDIATE_ -IN_ -INDEX_ -INDEXED_ -INITIALLY_ -INNER_ -INSERT_ -INSTEAD_ -INTERSECT_ -INTO_ -IS_ -ISNULL_ -JOIN_ -KEY_ -LEFT_ -LIKE_ -LIMIT_ -MATCH_ -NATURAL_ -NO_ -NOT_ -NOTNULL_ -NULL_ -OF_ -OFFSET_ -ON_ -OR_ -ORDER_ -OUTER_ -PLAN_ -PRAGMA_ -PRIMARY_ -QUERY_ -RAISE_ -RECURSIVE_ -REFERENCES_ -REGEXP_ -REINDEX_ -RELEASE_ -RENAME_ -REPLACE_ -RESTRICT_ -RETURNING_ -RIGHT_ -ROLLBACK_ -ROW_ -ROWS_ -SAVEPOINT_ -SELECT_ -SET_ -TABLE_ -TEMP_ -TEMPORARY_ -THEN_ -TO_ -TRANSACTION_ -TRIGGER_ -UNION_ -UNIQUE_ -UPDATE_ -USING_ -VACUUM_ -VALUES_ -VIEW_ -VIRTUAL_ -WHEN_ -WHERE_ -WITH_ -WITHOUT_ -FIRST_VALUE_ -OVER_ -PARTITION_ -RANGE_ -PRECEDING_ -UNBOUNDED_ -CURRENT_ -FOLLOWING_ -CUME_DIST_ -DENSE_RANK_ -LAG_ -LAST_VALUE_ -LEAD_ -NTH_VALUE_ -NTILE_ -PERCENT_RANK_ -RANK_ -ROW_NUMBER_ -GENERATED_ -ALWAYS_ -STORED_ -TRUE_ -FALSE_ -WINDOW_ -NULLS_ -FIRST_ -LAST_ -FILTER_ -GROUPS_ -EXCLUDE_ -TIES_ -OTHERS_ -DO_ -NOTHING_ -IDENTIFIER -NUMERIC_LITERAL -BIND_PARAMETER -STRING_LITERAL -BLOB_LITERAL -SINGLE_LINE_COMMENT -MULTILINE_COMMENT -SPACES -UNEXPECTED_CHAR -HEX_DIGIT -DIGIT - -channel names: -DEFAULT_TOKEN_CHANNEL -HIDDEN - -mode names: -DEFAULT_MODE - -atn: +token literal names: +null +';' +'.' +'(' +')' +',' +'=' +'*' +'+' +'-' +'~' +'||' +'/' +'%' +'<<' +'>>' +'&' +'|' +'<' +'<=' +'>' +'>=' +'==' +'!=' +'<>' +'ABORT' +'ACTION' +'ADD' +'AFTER' +'ALL' +'ALTER' +'ANALYZE' +'AND' +'AS' +'ASC' +'ATTACH' +'AUTOINCREMENT' +'BEFORE' +'BEGIN' +'BETWEEN' +'BY' +'CASCADE' +'CASE' +'CAST' +'CHECK' +'COLLATE' +'COLUMN' +'COMMIT' +'CONFLICT' +'CONSTRAINT' +'CREATE' +'CROSS' +'CURRENT_DATE' +'CURRENT_TIME' +'CURRENT_TIMESTAMP' +'DATABASE' +'DEFAULT' +'DEFERRABLE' +'DEFERRED' +'DELETE' +'DESC' +'DETACH' +'DISTINCT' +'DROP' +'EACH' +'ELSE' +'END' +'ESCAPE' +'EXCEPT' +'EXCLUSIVE' +'EXISTS' +'EXPLAIN' +'FAIL' +'FOR' +'FOREIGN' +'FROM' +'FULL' +'GLOB' +'GROUP' +'HAVING' +'IF' +'IGNORE' +'IMMEDIATE' +'IN' +'INDEX' +'INDEXED' +'INITIALLY' +'INNER' +'INSERT' +'INSTEAD' +'INTERSECT' +'INTO' +'IS' +'ISNULL' +'JOIN' +'KEY' +'LEFT' +'LIKE' +'LIMIT' +'MATCH' +'NATURAL' +'NO' +'NOT' +'NOTNULL' +'NULL' +'OF' +'OFFSET' +'ON' +'OR' +'ORDER' +'OUTER' +'PLAN' +'PRAGMA' +'PRIMARY' +'QUERY' +'RAISE' +'RECURSIVE' +'REFERENCES' +'REGEXP' +'REINDEX' +'RELEASE' +'RENAME' +'REPLACE' +'RESTRICT' +'RETURNING' +'RIGHT' +'ROLLBACK' +'ROW' +'ROWS' +'SAVEPOINT' +'SELECT' +'SET' +'TABLE' +'TEMP' +'TEMPORARY' +'THEN' +'TO' +'TRANSACTION' +'TRIGGER' +'UNION' +'UNIQUE' +'UPDATE' +'USING' +'VACUUM' +'VALUES' +'VIEW' +'VIRTUAL' +'WHEN' +'WHERE' +'WITH' +'WITHOUT' +'FIRST_VALUE' +'OVER' +'PARTITION' +'RANGE' +'PRECEDING' +'UNBOUNDED' +'CURRENT' +'FOLLOWING' +'CUME_DIST' +'DENSE_RANK' +'LAG' +'LAST_VALUE' +'LEAD' +'NTH_VALUE' +'NTILE' +'PERCENT_RANK' +'RANK' +'ROW_NUMBER' +'GENERATED' +'ALWAYS' +'STORED' +'TRUE' +'FALSE' +'WINDOW' +'NULLS' +'FIRST' +'LAST' +'FILTER' +'GROUPS' +'EXCLUDE' +'TIES' +'OTHERS' +'DO' +'NOTHING' +null +null +null +null +null +null +null +null +null + +token symbolic names: +null +SCOL +DOT +OPEN_PAR +CLOSE_PAR +COMMA +ASSIGN +STAR +PLUS +MINUS +TILDE +PIPE2 +DIV +MOD +LT2 +GT2 +AMP +PIPE +LT +LT_EQ +GT +GT_EQ +EQ +NOT_EQ1 +NOT_EQ2 +ABORT_ +ACTION_ +ADD_ +AFTER_ +ALL_ +ALTER_ +ANALYZE_ +AND_ +AS_ +ASC_ +ATTACH_ +AUTOINCREMENT_ +BEFORE_ +BEGIN_ +BETWEEN_ +BY_ +CASCADE_ +CASE_ +CAST_ +CHECK_ +COLLATE_ +COLUMN_ +COMMIT_ +CONFLICT_ +CONSTRAINT_ +CREATE_ +CROSS_ +CURRENT_DATE_ +CURRENT_TIME_ +CURRENT_TIMESTAMP_ +DATABASE_ +DEFAULT_ +DEFERRABLE_ +DEFERRED_ +DELETE_ +DESC_ +DETACH_ +DISTINCT_ +DROP_ +EACH_ +ELSE_ +END_ +ESCAPE_ +EXCEPT_ +EXCLUSIVE_ +EXISTS_ +EXPLAIN_ +FAIL_ +FOR_ +FOREIGN_ +FROM_ +FULL_ +GLOB_ +GROUP_ +HAVING_ +IF_ +IGNORE_ +IMMEDIATE_ +IN_ +INDEX_ +INDEXED_ +INITIALLY_ +INNER_ +INSERT_ +INSTEAD_ +INTERSECT_ +INTO_ +IS_ +ISNULL_ +JOIN_ +KEY_ +LEFT_ +LIKE_ +LIMIT_ +MATCH_ +NATURAL_ +NO_ +NOT_ +NOTNULL_ +NULL_ +OF_ +OFFSET_ +ON_ +OR_ +ORDER_ +OUTER_ +PLAN_ +PRAGMA_ +PRIMARY_ +QUERY_ +RAISE_ +RECURSIVE_ +REFERENCES_ +REGEXP_ +REINDEX_ +RELEASE_ +RENAME_ +REPLACE_ +RESTRICT_ +RETURNING_ +RIGHT_ +ROLLBACK_ +ROW_ +ROWS_ +SAVEPOINT_ +SELECT_ +SET_ +TABLE_ +TEMP_ +TEMPORARY_ +THEN_ +TO_ +TRANSACTION_ +TRIGGER_ +UNION_ +UNIQUE_ +UPDATE_ +USING_ +VACUUM_ +VALUES_ +VIEW_ +VIRTUAL_ +WHEN_ +WHERE_ +WITH_ +WITHOUT_ +FIRST_VALUE_ +OVER_ +PARTITION_ +RANGE_ +PRECEDING_ +UNBOUNDED_ +CURRENT_ +FOLLOWING_ +CUME_DIST_ +DENSE_RANK_ +LAG_ +LAST_VALUE_ +LEAD_ +NTH_VALUE_ +NTILE_ +PERCENT_RANK_ +RANK_ +ROW_NUMBER_ +GENERATED_ +ALWAYS_ +STORED_ +TRUE_ +FALSE_ +WINDOW_ +NULLS_ +FIRST_ +LAST_ +FILTER_ +GROUPS_ +EXCLUDE_ +TIES_ +OTHERS_ +DO_ +NOTHING_ +IDENTIFIER +NUMERIC_LITERAL +BIND_PARAMETER +STRING_LITERAL +BLOB_LITERAL +SINGLE_LINE_COMMENT +MULTILINE_COMMENT +SPACES +UNEXPECTED_CHAR + +rule names: +SCOL +DOT +OPEN_PAR +CLOSE_PAR +COMMA +ASSIGN +STAR +PLUS +MINUS +TILDE +PIPE2 +DIV +MOD +LT2 +GT2 +AMP +PIPE +LT +LT_EQ +GT +GT_EQ +EQ +NOT_EQ1 +NOT_EQ2 +ABORT_ +ACTION_ +ADD_ +AFTER_ +ALL_ +ALTER_ +ANALYZE_ +AND_ +AS_ +ASC_ +ATTACH_ +AUTOINCREMENT_ +BEFORE_ +BEGIN_ +BETWEEN_ +BY_ +CASCADE_ +CASE_ +CAST_ +CHECK_ +COLLATE_ +COLUMN_ +COMMIT_ +CONFLICT_ +CONSTRAINT_ +CREATE_ +CROSS_ +CURRENT_DATE_ +CURRENT_TIME_ +CURRENT_TIMESTAMP_ +DATABASE_ +DEFAULT_ +DEFERRABLE_ +DEFERRED_ +DELETE_ +DESC_ +DETACH_ +DISTINCT_ +DROP_ +EACH_ +ELSE_ +END_ +ESCAPE_ +EXCEPT_ +EXCLUSIVE_ +EXISTS_ +EXPLAIN_ +FAIL_ +FOR_ +FOREIGN_ +FROM_ +FULL_ +GLOB_ +GROUP_ +HAVING_ +IF_ +IGNORE_ +IMMEDIATE_ +IN_ +INDEX_ +INDEXED_ +INITIALLY_ +INNER_ +INSERT_ +INSTEAD_ +INTERSECT_ +INTO_ +IS_ +ISNULL_ +JOIN_ +KEY_ +LEFT_ +LIKE_ +LIMIT_ +MATCH_ +NATURAL_ +NO_ +NOT_ +NOTNULL_ +NULL_ +OF_ +OFFSET_ +ON_ +OR_ +ORDER_ +OUTER_ +PLAN_ +PRAGMA_ +PRIMARY_ +QUERY_ +RAISE_ +RECURSIVE_ +REFERENCES_ +REGEXP_ +REINDEX_ +RELEASE_ +RENAME_ +REPLACE_ +RESTRICT_ +RETURNING_ +RIGHT_ +ROLLBACK_ +ROW_ +ROWS_ +SAVEPOINT_ +SELECT_ +SET_ +TABLE_ +TEMP_ +TEMPORARY_ +THEN_ +TO_ +TRANSACTION_ +TRIGGER_ +UNION_ +UNIQUE_ +UPDATE_ +USING_ +VACUUM_ +VALUES_ +VIEW_ +VIRTUAL_ +WHEN_ +WHERE_ +WITH_ +WITHOUT_ +FIRST_VALUE_ +OVER_ +PARTITION_ +RANGE_ +PRECEDING_ +UNBOUNDED_ +CURRENT_ +FOLLOWING_ +CUME_DIST_ +DENSE_RANK_ +LAG_ +LAST_VALUE_ +LEAD_ +NTH_VALUE_ +NTILE_ +PERCENT_RANK_ +RANK_ +ROW_NUMBER_ +GENERATED_ +ALWAYS_ +STORED_ +TRUE_ +FALSE_ +WINDOW_ +NULLS_ +FIRST_ +LAST_ +FILTER_ +GROUPS_ +EXCLUDE_ +TIES_ +OTHERS_ +DO_ +NOTHING_ +IDENTIFIER +NUMERIC_LITERAL +BIND_PARAMETER +STRING_LITERAL +BLOB_LITERAL +SINGLE_LINE_COMMENT +MULTILINE_COMMENT +SPACES +UNEXPECTED_CHAR +HEX_DIGIT +DIGIT + +channel names: +DEFAULT_TOKEN_CHANNEL +HIDDEN + +mode names: +DEFAULT_MODE + +atn: [4, 0, 193, 1704, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7, 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25, 2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2, 31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36, 7, 36, 2, 37, 7, 37, 2, 38, 7, 38, 2, 39, 7, 39, 2, 40, 7, 40, 2, 41, 7, 41, 2, 42, 7, 42, 2, 43, 7, 43, 2, 44, 7, 44, 2, 45, 7, 45, 2, 46, 7, 46, 2, 47, 7, 47, 2, 48, 7, 48, 2, 49, 7, 49, 2, 50, 7, 50, 2, 51, 7, 51, 2, 52, 7, 52, 2, 53, 7, 53, 2, 54, 7, 54, 2, 55, 7, 55, 2, 56, 7, 56, 2, 57, 7, 57, 2, 58, 7, 58, 2, 59, 7, 59, 2, 60, 7, 60, 2, 61, 7, 61, 2, 62, 7, 62, 2, 63, 7, 63, 2, 64, 7, 64, 2, 65, 7, 65, 2, 66, 7, 66, 2, 67, 7, 67, 2, 68, 7, 68, 2, 69, 7, 69, 2, 70, 7, 70, 2, 71, 7, 71, 2, 72, 7, 72, 2, 73, 7, 73, 2, 74, 7, 74, 2, 75, 7, 75, 2, 76, 7, 76, 2, 77, 7, 77, 2, 78, 7, 78, 2, 79, 7, 79, 2, 80, 7, 80, 2, 81, 7, 81, 2, 82, 7, 82, 2, 83, 7, 83, 2, 84, 7, 84, 2, 85, 7, 85, 2, 86, 7, 86, 2, 87, 7, 87, 2, 88, 7, 88, 2, 89, 7, 89, 2, 90, 7, 90, 2, 91, 7, 91, 2, 92, 7, 92, 2, 93, 7, 93, 2, 94, 7, 94, 2, 95, 7, 95, 2, 96, 7, 96, 2, 97, 7, 97, 2, 98, 7, 98, 2, 99, 7, 99, 2, 100, 7, 100, 2, 101, 7, 101, 2, 102, 7, 102, 2, 103, 7, 103, 2, 104, 7, 104, 2, 105, 7, 105, 2, 106, 7, 106, 2, 107, 7, 107, 2, 108, 7, 108, 2, 109, 7, 109, 2, 110, 7, 110, 2, 111, 7, 111, 2, 112, 7, 112, 2, 113, 7, 113, 2, 114, 7, 114, 2, 115, 7, 115, 2, 116, 7, 116, 2, 117, 7, 117, 2, 118, 7, 118, 2, 119, 7, 119, 2, 120, 7, 120, 2, 121, 7, 121, 2, 122, 7, 122, 2, 123, 7, 123, 2, 124, 7, 124, 2, 125, 7, 125, 2, 126, 7, 126, 2, 127, 7, 127, 2, 128, 7, 128, 2, 129, 7, 129, 2, 130, 7, 130, 2, 131, 7, 131, 2, 132, 7, 132, 2, 133, 7, 133, 2, 134, 7, 134, 2, 135, 7, 135, 2, 136, 7, 136, 2, 137, 7, 137, 2, 138, 7, 138, 2, 139, 7, 139, 2, 140, 7, 140, 2, 141, 7, 141, 2, 142, 7, 142, 2, 143, 7, 143, 2, 144, 7, 144, 2, 145, 7, 145, 2, 146, 7, 146, 2, 147, 7, 147, 2, 148, 7, 148, 2, 149, 7, 149, 2, 150, 7, 150, 2, 151, 7, 151, 2, 152, 7, 152, 2, 153, 7, 153, 2, 154, 7, 154, 2, 155, 7, 155, 2, 156, 7, 156, 2, 157, 7, 157, 2, 158, 7, 158, 2, 159, 7, 159, 2, 160, 7, 160, 2, 161, 7, 161, 2, 162, 7, 162, 2, 163, 7, 163, 2, 164, 7, 164, 2, 165, 7, 165, 2, 166, 7, 166, 2, 167, 7, 167, 2, 168, 7, 168, 2, 169, 7, 169, 2, 170, 7, 170, 2, 171, 7, 171, 2, 172, 7, 172, 2, 173, 7, 173, 2, 174, 7, 174, 2, 175, 7, 175, 2, 176, 7, 176, 2, 177, 7, 177, 2, 178, 7, 178, 2, 179, 7, 179, 2, 180, 7, 180, 2, 181, 7, 181, 2, 182, 7, 182, 2, 183, 7, 183, 2, 184, 7, 184, 2, 185, 7, 185, 2, 186, 7, 186, 2, 187, 7, 187, 2, 188, 7, 188, 2, 189, 7, 189, 2, 190, 7, 190, 2, 191, 7, 191, 2, 192, 7, 192, 2, 193, 7, 193, 2, 194, 7, 194, 1, 0, 1, 0, 1, 1, 1, 1, 1, 2, 1, 2, 1, 3, 1, 3, 1, 4, 1, 4, 1, 5, 1, 5, 1, 6, 1, 6, 1, 7, 1, 7, 1, 8, 1, 8, 1, 9, 1, 9, 1, 10, 1, 10, 1, 10, 1, 11, 1, 11, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13, 1, 14, 1, 14, 1, 14, 1, 15, 1, 15, 1, 16, 1, 16, 1, 17, 1, 17, 1, 18, 1, 18, 1, 18, 1, 19, 1, 19, 1, 20, 1, 20, 1, 20, 1, 21, 1, 21, 1, 21, 1, 22, 1, 22, 1, 22, 1, 23, 1, 23, 1, 23, 1, 24, 1, 24, 1, 24, 1, 24, 1, 24, 1, 24, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 26, 1, 26, 1, 26, 1, 26, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 28, 1, 28, 1, 28, 1, 28, 1, 29, 1, 29, 1, 29, 1, 29, 1, 29, 1, 29, 1, 30, 1, 30, 1, 30, 1, 30, 1, 30, 1, 30, 1, 30, 1, 30, 1, 31, 1, 31, 1, 31, 1, 31, 1, 32, 1, 32, 1, 32, 1, 33, 1, 33, 1, 33, 1, 33, 1, 34, 1, 34, 1, 34, 1, 34, 1, 34, 1, 34, 1, 34, 1, 35, 1, 35, 1, 35, 1, 35, 1, 35, 1, 35, 1, 35, 1, 35, 1, 35, 1, 35, 1, 35, 1, 35, 1, 35, 1, 35, 1, 36, 1, 36, 1, 36, 1, 36, 1, 36, 1, 36, 1, 36, 1, 37, 1, 37, 1, 37, 1, 37, 1, 37, 1, 37, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 39, 1, 39, 1, 39, 1, 40, 1, 40, 1, 40, 1, 40, 1, 40, 1, 40, 1, 40, 1, 40, 1, 41, 1, 41, 1, 41, 1, 41, 1, 41, 1, 42, 1, 42, 1, 42, 1, 42, 1, 42, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 45, 1, 45, 1, 45, 1, 45, 1, 45, 1, 45, 1, 45, 1, 46, 1, 46, 1, 46, 1, 46, 1, 46, 1, 46, 1, 46, 1, 47, 1, 47, 1, 47, 1, 47, 1, 47, 1, 47, 1, 47, 1, 47, 1, 47, 1, 48, 1, 48, 1, 48, 1, 48, 1, 48, 1, 48, 1, 48, 1, 48, 1, 48, 1, 48, 1, 48, 1, 49, 1, 49, 1, 49, 1, 49, 1, 49, 1, 49, 1, 49, 1, 50, 1, 50, 1, 50, 1, 50, 1, 50, 1, 50, 1, 51, 1, 51, 1, 51, 1, 51, 1, 51, 1, 51, 1, 51, 1, 51, 1, 51, 1, 51, 1, 51, 1, 51, 1, 51, 1, 52, 1, 52, 1, 52, 1, 52, 1, 52, 1, 52, 1, 52, 1, 52, 1, 52, 1, 52, 1, 52, 1, 52, 1, 52, 1, 53, 1, 53, 1, 53, 1, 53, 1, 53, 1, 53, 1, 53, 1, 53, 1, 53, 1, 53, 1, 53, 1, 53, 1, 53, 1, 53, 1, 53, 1, 53, 1, 53, 1, 53, 1, 54, 1, 54, 1, 54, 1, 54, 1, 54, 1, 54, 1, 54, 1, 54, 1, 54, 1, 55, 1, 55, 1, 55, 1, 55, 1, 55, 1, 55, 1, 55, 1, 55, 1, 56, 1, 56, 1, 56, 1, 56, 1, 56, 1, 56, 1, 56, 1, 56, 1, 56, 1, 56, 1, 56, 1, 57, 1, 57, 1, 57, 1, 57, 1, 57, 1, 57, 1, 57, 1, 57, 1, 57, 1, 58, 1, 58, 1, 58, 1, 58, 1, 58, 1, 58, 1, 58, 1, 59, 1, 59, 1, 59, 1, 59, 1, 59, 1, 60, 1, 60, 1, 60, 1, 60, 1, 60, 1, 60, 1, 60, 1, 61, 1, 61, 1, 61, 1, 61, 1, 61, 1, 61, 1, 61, 1, 61, 1, 61, 1, 62, 1, 62, 1, 62, 1, 62, 1, 62, 1, 63, 1, 63, 1, 63, 1, 63, 1, 63, 1, 64, 1, 64, 1, 64, 1, 64, 1, 64, 1, 65, 1, 65, 1, 65, 1, 65, 1, 66, 1, 66, 1, 66, 1, 66, 1, 66, 1, 66, 1, 66, 1, 67, 1, 67, 1, 67, 1, 67, 1, 67, 1, 67, 1, 67, 1, 68, 1, 68, 1, 68, 1, 68, 1, 68, 1, 68, 1, 68, 1, 68, 1, 68, 1, 68, 1, 69, 1, 69, 1, 69, 1, 69, 1, 69, 1, 69, 1, 69, 1, 70, 1, 70, 1, 70, 1, 70, 1, 70, 1, 70, 1, 70, 1, 70, 1, 71, 1, 71, 1, 71, 1, 71, 1, 71, 1, 72, 1, 72, 1, 72, 1, 72, 1, 73, 1, 73, 1, 73, 1, 73, 1, 73, 1, 73, 1, 73, 1, 73, 1, 74, 1, 74, 1, 74, 1, 74, 1, 74, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 76, 1, 76, 1, 76, 1, 76, 1, 76, 1, 77, 1, 77, 1, 77, 1, 77, 1, 77, 1, 77, 1, 78, 1, 78, 1, 78, 1, 78, 1, 78, 1, 78, 1, 78, 1, 79, 1, 79, 1, 79, 1, 80, 1, 80, 1, 80, 1, 80, 1, 80, 1, 80, 1, 80, 1, 81, 1, 81, 1, 81, 1, 81, 1, 81, 1, 81, 1, 81, 1, 81, 1, 81, 1, 81, 1, 82, 1, 82, 1, 82, 1, 83, 1, 83, 1, 83, 1, 83, 1, 83, 1, 83, 1, 84, 1, 84, 1, 84, 1, 84, 1, 84, 1, 84, 1, 84, 1, 84, 1, 85, 1, 85, 1, 85, 1, 85, 1, 85, 1, 85, 1, 85, 1, 85, 1, 85, 1, 85, 1, 86, 1, 86, 1, 86, 1, 86, 1, 86, 1, 86, 1, 87, 1, 87, 1, 87, 1, 87, 1, 87, 1, 87, 1, 87, 1, 88, 1, 88, 1, 88, 1, 88, 1, 88, 1, 88, 1, 88, 1, 88, 1, 89, 1, 89, 1, 89, 1, 89, 1, 89, 1, 89, 1, 89, 1, 89, 1, 89, 1, 89, 1, 90, 1, 90, 1, 90, 1, 90, 1, 90, 1, 91, 1, 91, 1, 91, 1, 92, 1, 92, 1, 92, 1, 92, 1, 92, 1, 92, 1, 92, 1, 93, 1, 93, 1, 93, 1, 93, 1, 93, 1, 94, 1, 94, 1, 94, 1, 94, 1, 95, 1, 95, 1, 95, 1, 95, 1, 95, 1, 96, 1, 96, 1, 96, 1, 96, 1, 96, 1, 97, 1, 97, 1, 97, 1, 97, 1, 97, 1, 97, 1, 98, 1, 98, 1, 98, 1, 98, 1, 98, 1, 98, 1, 99, 1, 99, 1, 99, 1, 99, 1, 99, 1, 99, 1, 99, 1, 99, 1, 100, 1, 100, 1, 100, 1, 101, 1, 101, 1, 101, 1, 101, 1, 102, 1, 102, 1, 102, 1, 102, 1, 102, 1, 102, 1, 102, 1, 102, 1, 103, 1, 103, 1, 103, 1, 103, 1, 103, 1, 104, 1, 104, 1, 104, 1, 105, 1, 105, 1, 105, 1, 105, 1, 105, 1, 105, 1, 105, 1, 106, 1, 106, 1, 106, 1, 107, 1, 107, 1, 107, 1, 108, 1, 108, 1, 108, 1, 108, 1, 108, 1, 108, 1, 109, 1, 109, 1, 109, 1, 109, 1, 109, 1, 109, 1, 110, 1, 110, 1, 110, 1, 110, 1, 110, 1, 111, 1, 111, 1, 111, 1, 111, 1, 111, 1, 111, 1, 111, 1, 112, 1, 112, 1, 112, 1, 112, 1, 112, 1, 112, 1, 112, 1, 112, 1, 113, 1, 113, 1, 113, 1, 113, 1, 113, 1, 113, 1, 114, 1, 114, 1, 114, 1, 114, 1, 114, 1, 114, 1, 115, 1, 115, 1, 115, 1, 115, 1, 115, 1, 115, 1, 115, 1, 115, 1, 115, 1, 115, 1, 116, 1, 116, 1, 116, 1, 116, 1, 116, 1, 116, 1, 116, 1, 116, 1, 116, 1, 116, 1, 116, 1, 117, 1, 117, 1, 117, 1, 117, 1, 117, 1, 117, 1, 117, 1, 118, 1, 118, 1, 118, 1, 118, 1, 118, 1, 118, 1, 118, 1, 118, 1, 119, 1, 119, 1, 119, 1, 119, 1, 119, 1, 119, 1, 119, 1, 119, 1, 120, 1, 120, 1, 120, 1, 120, 1, 120, 1, 120, 1, 120, 1, 121, 1, 121, 1, 121, 1, 121, 1, 121, 1, 121, 1, 121, 1, 121, 1, 122, 1, 122, 1, 122, 1, 122, 1, 122, 1, 122, 1, 122, 1, 122, 1, 122, 1, 123, 1, 123, 1, 123, 1, 123, 1, 123, 1, 123, 1, 123, 1, 123, 1, 123, 1, 123, 1, 124, 1, 124, 1, 124, 1, 124, 1, 124, 1, 124, 1, 125, 1, 125, 1, 125, 1, 125, 1, 125, 1, 125, 1, 125, 1, 125, 1, 125, 1, 126, 1, 126, 1, 126, 1, 126, 1, 127, 1, 127, 1, 127, 1, 127, 1, 127, 1, 128, 1, 128, 1, 128, 1, 128, 1, 128, 1, 128, 1, 128, 1, 128, 1, 128, 1, 128, 1, 129, 1, 129, 1, 129, 1, 129, 1, 129, 1, 129, 1, 129, 1, 130, 1, 130, 1, 130, 1, 130, 1, 131, 1, 131, 1, 131, 1, 131, 1, 131, 1, 131, 1, 132, 1, 132, 1, 132, 1, 132, 1, 132, 1, 133, 1, 133, 1, 133, 1, 133, 1, 133, 1, 133, 1, 133, 1, 133, 1, 133, 1, 133, 1, 134, 1, 134, 1, 134, 1, 134, 1, 134, 1, 135, 1, 135, 1, 135, 1, 136, 1, 136, 1, 136, 1, 136, 1, 136, 1, 136, 1, 136, 1, 136, 1, 136, 1, 136, 1, 136, 1, 136, 1, 137, 1, 137, 1, 137, 1, 137, 1, 137, 1, 137, 1, 137, 1, 137, 1, 138, 1, 138, 1, 138, 1, 138, 1, 138, 1, 138, 1, 139, 1, 139, 1, 139, 1, 139, 1, 139, 1, 139, 1, 139, 1, 140, 1, 140, 1, 140, 1, 140, 1, 140, 1, 140, 1, 140, 1, 141, 1, 141, 1, 141, 1, 141, 1, 141, 1, 141, 1, 142, 1, 142, 1, 142, 1, 142, 1, 142, 1, 142, 1, 142, 1, 143, 1, 143, 1, 143, 1, 143, 1, 143, 1, 143, 1, 143, 1, 144, 1, 144, 1, 144, 1, 144, 1, 144, 1, 145, 1, 145, 1, 145, 1, 145, 1, 145, 1, 145, 1, 145, 1, 145, 1, 146, 1, 146, 1, 146, 1, 146, 1, 146, 1, 147, 1, 147, 1, 147, 1, 147, 1, 147, 1, 147, 1, 148, 1, 148, 1, 148, 1, 148, 1, 148, 1, 149, 1, 149, 1, 149, 1, 149, 1, 149, 1, 149, 1, 149, 1, 149, 1, 150, 1, 150, 1, 150, 1, 150, 1, 150, 1, 150, 1, 150, 1, 150, 1, 150, 1, 150, 1, 150, 1, 150, 1, 151, 1, 151, 1, 151, 1, 151, 1, 151, 1, 152, 1, 152, 1, 152, 1, 152, 1, 152, 1, 152, 1, 152, 1, 152, 1, 152, 1, 152, 1, 153, 1, 153, 1, 153, 1, 153, 1, 153, 1, 153, 1, 154, 1, 154, 1, 154, 1, 154, 1, 154, 1, 154, 1, 154, 1, 154, 1, 154, 1, 154, 1, 155, 1, 155, 1, 155, 1, 155, 1, 155, 1, 155, 1, 155, 1, 155, 1, 155, 1, 155, 1, 156, 1, 156, 1, 156, 1, 156, 1, 156, 1, 156, 1, 156, 1, 156, 1, 157, 1, 157, 1, 157, 1, 157, 1, 157, 1, 157, 1, 157, 1, 157, 1, 157, 1, 157, 1, 158, 1, 158, 1, 158, 1, 158, 1, 158, 1, 158, 1, 158, 1, 158, 1, 158, 1, 158, 1, 159, 1, 159, 1, 159, 1, 159, 1, 159, 1, 159, 1, 159, 1, 159, 1, 159, 1, 159, 1, 159, 1, 160, 1, 160, 1, 160, 1, 160, 1, 161, 1, 161, 1, 161, 1, 161, 1, 161, 1, 161, 1, 161, 1, 161, 1, 161, 1, 161, 1, 161, 1, 162, 1, 162, 1, 162, 1, 162, 1, 162, 1, 163, 1, 163, 1, 163, 1, 163, 1, 163, 1, 163, 1, 163, 1, 163, 1, 163, 1, 163, 1, 164, 1, 164, 1, 164, 1, 164, 1, 164, 1, 164, 1, 165, 1, 165, 1, 165, 1, 165, 1, 165, 1, 165, 1, 165, 1, 165, 1, 165, 1, 165, 1, 165, 1, 165, 1, 165, 1, 166, 1, 166, 1, 166, 1, 166, 1, 166, 1, 167, 1, 167, 1, 167, 1, 167, 1, 167, 1, 167, 1, 167, 1, 167, 1, 167, 1, 167, 1, 167, 1, 168, 1, 168, 1, 168, 1, 168, 1, 168, 1, 168, 1, 168, 1, 168, 1, 168, 1, 168, 1, 169, 1, 169, 1, 169, 1, 169, 1, 169, 1, 169, 1, 169, 1, 170, 1, 170, 1, 170, 1, 170, 1, 170, 1, 170, 1, 170, 1, 171, 1, 171, 1, 171, 1, 171, 1, 171, 1, 172, 1, 172, 1, 172, 1, 172, 1, 172, 1, 172, 1, 173, 1, 173, 1, 173, 1, 173, 1, 173, 1, 173, 1, 173, 1, 174, 1, 174, 1, 174, 1, 174, 1, 174, 1, 174, 1, 175, 1, 175, 1, 175, 1, 175, 1, 175, 1, 175, 1, 176, 1, 176, 1, 176, 1, 176, 1, 176, 1, 177, 1, 177, 1, 177, 1, 177, 1, 177, 1, 177, 1, 177, 1, 178, 1, 178, 1, 178, 1, 178, 1, 178, 1, 178, 1, 178, 1, 179, 1, 179, 1, 179, 1, 179, 1, 179, 1, 179, 1, 179, 1, 179, 1, 180, 1, 180, 1, 180, 1, 180, 1, 180, 1, 181, 1, 181, 1, 181, 1, 181, 1, 181, 1, 181, 1, 181, 1, 182, 1, 182, 1, 182, 1, 183, 1, 183, 1, 183, 1, 183, 1, 183, 1, 183, 1, 183, 1, 183, 1, 184, 1, 184, 1, 184, 1, 184, 5, 184, 1562, 8, 184, 10, 184, 12, 184, 1565, 9, 184, 1, 184, 1, 184, 1, 184, 1, 184, 1, 184, 5, 184, 1572, 8, 184, 10, 184, 12, 184, 1575, 9, 184, 1, 184, 1, 184, 1, 184, 5, 184, 1580, 8, 184, 10, 184, 12, 184, 1583, 9, 184, 1, 184, 1, 184, 1, 184, 5, 184, 1588, 8, 184, 10, 184, 12, 184, 1591, 9, 184, 3, 184, 1593, 8, 184, 1, 185, 4, 185, 1596, 8, 185, 11, 185, 12, 185, 1597, 1, 185, 1, 185, 5, 185, 1602, 8, 185, 10, 185, 12, 185, 1605, 9, 185, 3, 185, 1607, 8, 185, 1, 185, 1, 185, 4, 185, 1611, 8, 185, 11, 185, 12, 185, 1612, 3, 185, 1615, 8, 185, 1, 185, 1, 185, 3, 185, 1619, 8, 185, 1, 185, 4, 185, 1622, 8, 185, 11, 185, 12, 185, 1623, 3, 185, 1626, 8, 185, 1, 185, 1, 185, 1, 185, 1, 185, 4, 185, 1632, 8, 185, 11, 185, 12, 185, 1633, 3, 185, 1636, 8, 185, 1, 186, 1, 186, 5, 186, 1640, 8, 186, 10, 186, 12, 186, 1643, 9, 186, 1, 186, 1, 186, 3, 186, 1647, 8, 186, 1, 187, 1, 187, 1, 187, 1, 187, 5, 187, 1653, 8, 187, 10, 187, 12, 187, 1656, 9, 187, 1, 187, 1, 187, 1, 188, 1, 188, 1, 188, 1, 189, 1, 189, 1, 189, 1, 189, 5, 189, 1667, 8, 189, 10, 189, 12, 189, 1670, 9, 189, 1, 189, 3, 189, 1673, 8, 189, 1, 189, 1, 189, 3, 189, 1677, 8, 189, 1, 189, 1, 189, 1, 190, 1, 190, 1, 190, 1, 190, 5, 190, 1685, 8, 190, 10, 190, 12, 190, 1688, 9, 190, 1, 190, 1, 190, 1, 190, 1, 190, 1, 190, 1, 191, 1, 191, 1, 191, 1, 191, 1, 192, 1, 192, 1, 193, 1, 193, 1, 194, 1, 194, 1, 1686, 0, 195, 1, 1, 3, 2, 5, 3, 7, 4, 9, 5, 11, 6, 13, 7, 15, 8, 17, 9, 19, 10, 21, 11, 23, 12, 25, 13, 27, 14, 29, 15, 31, 16, 33, 17, 35, 18, 37, 19, 39, 20, 41, 21, 43, 22, 45, 23, 47, 24, 49, 25, 51, 26, 53, 27, 55, 28, 57, 29, 59, 30, 61, 31, 63, 32, 65, 33, 67, 34, 69, 35, 71, 36, 73, 37, 75, 38, 77, 39, 79, 40, 81, 41, 83, 42, 85, 43, 87, 44, 89, 45, 91, 46, 93, 47, 95, 48, 97, 49, 99, 50, 101, 51, 103, 52, 105, 53, 107, 54, 109, 55, 111, 56, 113, 57, 115, 58, 117, 59, 119, 60, 121, 61, 123, 62, 125, 63, 127, 64, 129, 65, 131, 66, 133, 67, 135, 68, 137, 69, 139, 70, 141, 71, 143, 72, 145, 73, 147, 74, 149, 75, 151, 76, 153, 77, 155, 78, 157, 79, 159, 80, 161, 81, 163, 82, 165, 83, 167, 84, 169, 85, 171, 86, 173, 87, 175, 88, 177, 89, 179, 90, 181, 91, 183, 92, 185, 93, 187, 94, 189, 95, 191, 96, 193, 97, 195, 98, 197, 99, 199, 100, 201, 101, 203, 102, 205, 103, 207, 104, 209, 105, 211, 106, 213, 107, 215, 108, 217, 109, 219, 110, 221, 111, 223, 112, 225, 113, 227, 114, 229, 115, 231, 116, 233, 117, 235, 118, 237, 119, 239, 120, 241, 121, 243, 122, 245, 123, 247, 124, 249, 125, 251, 126, 253, 127, 255, 128, 257, 129, 259, 130, 261, 131, 263, 132, 265, 133, 267, 134, 269, 135, 271, 136, 273, 137, 275, 138, 277, 139, 279, 140, 281, 141, 283, 142, 285, 143, 287, 144, 289, 145, 291, 146, 293, 147, 295, 148, 297, 149, 299, 150, 301, 151, 303, 152, 305, 153, 307, 154, 309, 155, 311, 156, 313, 157, 315, 158, 317, 159, 319, 160, 321, 161, 323, 162, 325, 163, 327, 164, 329, 165, 331, 166, 333, 167, 335, 168, 337, 169, 339, 170, 341, 171, 343, 172, 345, 173, 347, 174, 349, 175, 351, 176, 353, 177, 355, 178, 357, 179, 359, 180, 361, 181, 363, 182, 365, 183, 367, 184, 369, 185, 371, 186, 373, 187, 375, 188, 377, 189, 379, 190, 381, 191, 383, 192, 385, 193, 387, 0, 389, 0, 1, 0, 38, 2, 0, 65, 65, 97, 97, 2, 0, 66, 66, 98, 98, 2, 0, 79, 79, 111, 111, 2, 0, 82, 82, 114, 114, 2, 0, 84, 84, 116, 116, 2, 0, 67, 67, 99, 99, 2, 0, 73, 73, 105, 105, 2, 0, 78, 78, 110, 110, 2, 0, 68, 68, 100, 100, 2, 0, 70, 70, 102, 102, 2, 0, 69, 69, 101, 101, 2, 0, 76, 76, 108, 108, 2, 0, 89, 89, 121, 121, 2, 0, 90, 90, 122, 122, 2, 0, 83, 83, 115, 115, 2, 0, 72, 72, 104, 104, 2, 0, 85, 85, 117, 117, 2, 0, 77, 77, 109, 109, 2, 0, 71, 71, 103, 103, 2, 0, 87, 87, 119, 119, 2, 0, 75, 75, 107, 107, 2, 0, 80, 80, 112, 112, 2, 0, 88, 88, 120, 120, 2, 0, 86, 86, 118, 118, 2, 0, 74, 74, 106, 106, 2, 0, 81, 81, 113, 113, 1, 0, 34, 34, 1, 0, 96, 96, 1, 0, 93, 93, 3, 0, 65, 90, 95, 95, 97, 122, 4, 0, 48, 57, 65, 90, 95, 95, 97, 122, 2, 0, 43, 43, 45, 45, 3, 0, 36, 36, 58, 58, 64, 64, 1, 0, 39, 39, 2, 0, 10, 10, 13, 13, 3, 0, 9, 11, 13, 13, 32, 32, 3, 0, 48, 57, 65, 70, 97, 102, 1, 0, 48, 57, 1728, 0, 1, 1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 5, 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, 0, 9, 1, 0, 0, 0, 0, 11, 1, 0, 0, 0, 0, 13, 1, 0, 0, 0, 0, 15, 1, 0, 0, 0, 0, 17, 1, 0, 0, 0, 0, 19, 1, 0, 0, 0, 0, 21, 1, 0, 0, 0, 0, 23, 1, 0, 0, 0, 0, 25, 1, 0, 0, 0, 0, 27, 1, 0, 0, 0, 0, 29, 1, 0, 0, 0, 0, 31, 1, 0, 0, 0, 0, 33, 1, 0, 0, 0, 0, 35, 1, 0, 0, 0, 0, 37, 1, 0, 0, 0, 0, 39, 1, 0, 0, 0, 0, 41, 1, 0, 0, 0, 0, 43, 1, 0, 0, 0, 0, 45, 1, 0, 0, 0, 0, 47, 1, 0, 0, 0, 0, 49, 1, 0, 0, 0, 0, 51, 1, 0, 0, 0, 0, 53, 1, 0, 0, 0, 0, 55, 1, 0, 0, 0, 0, 57, 1, 0, 0, 0, 0, 59, 1, 0, 0, 0, 0, 61, 1, 0, 0, 0, 0, 63, 1, 0, 0, 0, 0, 65, 1, 0, 0, 0, 0, 67, 1, 0, 0, 0, 0, 69, 1, 0, 0, 0, 0, 71, 1, 0, 0, 0, 0, 73, 1, 0, 0, 0, 0, 75, 1, 0, 0, 0, 0, 77, 1, 0, 0, 0, 0, 79, 1, 0, 0, 0, 0, 81, 1, 0, 0, 0, 0, 83, 1, 0, 0, 0, 0, 85, 1, 0, 0, 0, 0, 87, 1, 0, 0, 0, 0, 89, 1, 0, 0, 0, 0, 91, 1, 0, 0, 0, 0, 93, 1, 0, 0, 0, 0, 95, 1, 0, 0, 0, 0, 97, 1, 0, 0, 0, 0, 99, 1, 0, 0, 0, 0, 101, 1, 0, 0, 0, 0, 103, 1, 0, 0, 0, 0, 105, 1, 0, 0, 0, 0, 107, 1, 0, 0, 0, 0, 109, 1, 0, 0, 0, 0, 111, 1, 0, 0, 0, 0, 113, 1, 0, 0, 0, 0, 115, 1, 0, 0, 0, 0, 117, 1, 0, 0, 0, 0, 119, 1, 0, 0, 0, 0, 121, 1, 0, 0, 0, 0, 123, 1, 0, 0, 0, 0, 125, 1, 0, 0, 0, 0, 127, 1, 0, 0, 0, 0, 129, 1, 0, 0, 0, 0, 131, 1, 0, 0, 0, 0, 133, 1, 0, 0, 0, 0, 135, 1, 0, 0, 0, 0, 137, 1, 0, 0, 0, 0, 139, 1, 0, 0, 0, 0, 141, 1, 0, 0, 0, 0, 143, 1, 0, 0, 0, 0, 145, 1, 0, 0, 0, 0, 147, 1, 0, 0, 0, 0, 149, 1, 0, 0, 0, 0, 151, 1, 0, 0, 0, 0, 153, 1, 0, 0, 0, 0, 155, 1, 0, 0, 0, 0, 157, 1, 0, 0, 0, 0, 159, 1, 0, 0, 0, 0, 161, 1, 0, 0, 0, 0, 163, 1, 0, 0, 0, 0, 165, 1, 0, 0, 0, 0, 167, 1, 0, 0, 0, 0, 169, 1, 0, 0, 0, 0, 171, 1, 0, 0, 0, 0, 173, 1, 0, 0, 0, 0, 175, 1, 0, 0, 0, 0, 177, 1, 0, 0, 0, 0, 179, 1, 0, 0, 0, 0, 181, 1, 0, 0, 0, 0, 183, 1, 0, 0, 0, 0, 185, 1, 0, 0, 0, 0, 187, 1, 0, 0, 0, 0, 189, 1, 0, 0, 0, 0, 191, 1, 0, 0, 0, 0, 193, 1, 0, 0, 0, 0, 195, 1, 0, 0, 0, 0, 197, 1, 0, 0, 0, 0, 199, 1, 0, 0, 0, 0, 201, 1, 0, 0, 0, 0, 203, 1, 0, 0, 0, 0, 205, 1, 0, 0, 0, 0, 207, 1, 0, 0, 0, 0, 209, 1, 0, 0, 0, 0, 211, 1, 0, 0, 0, 0, 213, 1, 0, 0, 0, 0, 215, 1, 0, 0, 0, 0, 217, 1, 0, 0, 0, 0, 219, 1, 0, 0, 0, 0, 221, 1, 0, 0, 0, 0, 223, 1, 0, 0, 0, 0, 225, 1, 0, 0, 0, 0, 227, 1, 0, 0, 0, 0, 229, 1, 0, 0, 0, 0, 231, 1, 0, 0, 0, 0, 233, 1, 0, 0, 0, 0, 235, 1, 0, 0, 0, 0, 237, 1, 0, 0, 0, 0, 239, 1, 0, 0, 0, 0, 241, 1, 0, 0, 0, 0, 243, 1, 0, 0, 0, 0, 245, 1, 0, 0, 0, 0, 247, 1, 0, 0, 0, 0, 249, 1, 0, 0, 0, 0, 251, 1, 0, 0, 0, 0, 253, 1, 0, 0, 0, 0, 255, 1, 0, 0, 0, 0, 257, 1, 0, 0, 0, 0, 259, 1, 0, 0, 0, 0, 261, 1, 0, 0, 0, 0, 263, 1, 0, 0, 0, 0, 265, 1, 0, 0, 0, 0, 267, 1, 0, 0, 0, 0, 269, 1, 0, 0, 0, 0, 271, 1, 0, 0, 0, 0, 273, 1, 0, 0, 0, 0, 275, 1, 0, 0, 0, 0, 277, 1, 0, 0, 0, 0, 279, 1, 0, 0, 0, 0, 281, 1, 0, 0, 0, 0, 283, 1, 0, 0, 0, 0, 285, 1, 0, 0, 0, 0, 287, 1, 0, 0, 0, 0, 289, 1, 0, 0, 0, 0, 291, 1, 0, 0, 0, 0, 293, 1, 0, 0, 0, 0, 295, 1, 0, 0, 0, 0, 297, 1, 0, 0, 0, 0, 299, 1, 0, 0, 0, 0, 301, 1, 0, 0, 0, 0, 303, 1, 0, 0, 0, 0, 305, 1, 0, 0, 0, 0, 307, 1, 0, 0, 0, 0, 309, 1, 0, 0, 0, 0, 311, 1, 0, 0, 0, 0, 313, 1, 0, 0, 0, 0, 315, 1, 0, 0, 0, 0, 317, 1, 0, 0, 0, 0, 319, 1, 0, 0, 0, 0, 321, 1, 0, 0, 0, 0, 323, 1, 0, 0, 0, 0, 325, 1, 0, 0, 0, 0, 327, 1, 0, 0, 0, 0, 329, 1, 0, 0, 0, 0, 331, 1, 0, 0, 0, 0, 333, 1, 0, 0, 0, 0, 335, 1, 0, 0, 0, 0, 337, 1, 0, 0, 0, 0, 339, 1, 0, 0, 0, 0, 341, 1, 0, 0, 0, 0, 343, 1, 0, 0, 0, 0, 345, 1, 0, 0, 0, 0, 347, 1, 0, 0, 0, 0, 349, 1, 0, 0, 0, 0, 351, 1, 0, 0, 0, 0, 353, 1, 0, 0, 0, 0, 355, 1, 0, 0, 0, 0, 357, 1, 0, 0, 0, 0, 359, 1, 0, 0, 0, 0, 361, 1, 0, 0, 0, 0, 363, 1, 0, 0, 0, 0, 365, 1, 0, 0, 0, 0, 367, 1, 0, 0, 0, 0, 369, 1, 0, 0, 0, 0, 371, 1, 0, 0, 0, 0, 373, 1, 0, 0, 0, 0, 375, 1, 0, 0, 0, 0, 377, 1, 0, 0, 0, 0, 379, 1, 0, 0, 0, 0, 381, 1, 0, 0, 0, 0, 383, 1, 0, 0, 0, 0, 385, 1, 0, 0, 0, 1, 391, 1, 0, 0, 0, 3, 393, 1, 0, 0, 0, 5, 395, 1, 0, 0, 0, 7, 397, 1, 0, 0, 0, 9, 399, 1, 0, 0, 0, 11, 401, 1, 0, 0, 0, 13, 403, 1, 0, 0, 0, 15, 405, 1, 0, 0, 0, 17, 407, 1, 0, 0, 0, 19, 409, 1, 0, 0, 0, 21, 411, 1, 0, 0, 0, 23, 414, 1, 0, 0, 0, 25, 416, 1, 0, 0, 0, 27, 418, 1, 0, 0, 0, 29, 421, 1, 0, 0, 0, 31, 424, 1, 0, 0, 0, 33, 426, 1, 0, 0, 0, 35, 428, 1, 0, 0, 0, 37, 430, 1, 0, 0, 0, 39, 433, 1, 0, 0, 0, 41, 435, 1, 0, 0, 0, 43, 438, 1, 0, 0, 0, 45, 441, 1, 0, 0, 0, 47, 444, 1, 0, 0, 0, 49, 447, 1, 0, 0, 0, 51, 453, 1, 0, 0, 0, 53, 460, 1, 0, 0, 0, 55, 464, 1, 0, 0, 0, 57, 470, 1, 0, 0, 0, 59, 474, 1, 0, 0, 0, 61, 480, 1, 0, 0, 0, 63, 488, 1, 0, 0, 0, 65, 492, 1, 0, 0, 0, 67, 495, 1, 0, 0, 0, 69, 499, 1, 0, 0, 0, 71, 506, 1, 0, 0, 0, 73, 520, 1, 0, 0, 0, 75, 527, 1, 0, 0, 0, 77, 533, 1, 0, 0, 0, 79, 541, 1, 0, 0, 0, 81, 544, 1, 0, 0, 0, 83, 552, 1, 0, 0, 0, 85, 557, 1, 0, 0, 0, 87, 562, 1, 0, 0, 0, 89, 568, 1, 0, 0, 0, 91, 576, 1, 0, 0, 0, 93, 583, 1, 0, 0, 0, 95, 590, 1, 0, 0, 0, 97, 599, 1, 0, 0, 0, 99, 610, 1, 0, 0, 0, 101, 617, 1, 0, 0, 0, 103, 623, 1, 0, 0, 0, 105, 636, 1, 0, 0, 0, 107, 649, 1, 0, 0, 0, 109, 667, 1, 0, 0, 0, 111, 676, 1, 0, 0, 0, 113, 684, 1, 0, 0, 0, 115, 695, 1, 0, 0, 0, 117, 704, 1, 0, 0, 0, 119, 711, 1, 0, 0, 0, 121, 716, 1, 0, 0, 0, 123, 723, 1, 0, 0, 0, 125, 732, 1, 0, 0, 0, 127, 737, 1, 0, 0, 0, 129, 742, 1, 0, 0, 0, 131, 747, 1, 0, 0, 0, 133, 751, 1, 0, 0, 0, 135, 758, 1, 0, 0, 0, 137, 765, 1, 0, 0, 0, 139, 775, 1, 0, 0, 0, 141, 782, 1, 0, 0, 0, 143, 790, 1, 0, 0, 0, 145, 795, 1, 0, 0, 0, 147, 799, 1, 0, 0, 0, 149, 807, 1, 0, 0, 0, 151, 812, 1, 0, 0, 0, 153, 817, 1, 0, 0, 0, 155, 822, 1, 0, 0, 0, 157, 828, 1, 0, 0, 0, 159, 835, 1, 0, 0, 0, 161, 838, 1, 0, 0, 0, 163, 845, 1, 0, 0, 0, 165, 855, 1, 0, 0, 0, 167, 858, 1, 0, 0, 0, 169, 864, 1, 0, 0, 0, 171, 872, 1, 0, 0, 0, 173, 882, 1, 0, 0, 0, 175, 888, 1, 0, 0, 0, 177, 895, 1, 0, 0, 0, 179, 903, 1, 0, 0, 0, 181, 913, 1, 0, 0, 0, 183, 918, 1, 0, 0, 0, 185, 921, 1, 0, 0, 0, 187, 928, 1, 0, 0, 0, 189, 933, 1, 0, 0, 0, 191, 937, 1, 0, 0, 0, 193, 942, 1, 0, 0, 0, 195, 947, 1, 0, 0, 0, 197, 953, 1, 0, 0, 0, 199, 959, 1, 0, 0, 0, 201, 967, 1, 0, 0, 0, 203, 970, 1, 0, 0, 0, 205, 974, 1, 0, 0, 0, 207, 982, 1, 0, 0, 0, 209, 987, 1, 0, 0, 0, 211, 990, 1, 0, 0, 0, 213, 997, 1, 0, 0, 0, 215, 1000, 1, 0, 0, 0, 217, 1003, 1, 0, 0, 0, 219, 1009, 1, 0, 0, 0, 221, 1015, 1, 0, 0, 0, 223, 1020, 1, 0, 0, 0, 225, 1027, 1, 0, 0, 0, 227, 1035, 1, 0, 0, 0, 229, 1041, 1, 0, 0, 0, 231, 1047, 1, 0, 0, 0, 233, 1057, 1, 0, 0, 0, 235, 1068, 1, 0, 0, 0, 237, 1075, 1, 0, 0, 0, 239, 1083, 1, 0, 0, 0, 241, 1091, 1, 0, 0, 0, 243, 1098, 1, 0, 0, 0, 245, 1106, 1, 0, 0, 0, 247, 1115, 1, 0, 0, 0, 249, 1125, 1, 0, 0, 0, 251, 1131, 1, 0, 0, 0, 253, 1140, 1, 0, 0, 0, 255, 1144, 1, 0, 0, 0, 257, 1149, 1, 0, 0, 0, 259, 1159, 1, 0, 0, 0, 261, 1166, 1, 0, 0, 0, 263, 1170, 1, 0, 0, 0, 265, 1176, 1, 0, 0, 0, 267, 1181, 1, 0, 0, 0, 269, 1191, 1, 0, 0, 0, 271, 1196, 1, 0, 0, 0, 273, 1199, 1, 0, 0, 0, 275, 1211, 1, 0, 0, 0, 277, 1219, 1, 0, 0, 0, 279, 1225, 1, 0, 0, 0, 281, 1232, 1, 0, 0, 0, 283, 1239, 1, 0, 0, 0, 285, 1245, 1, 0, 0, 0, 287, 1252, 1, 0, 0, 0, 289, 1259, 1, 0, 0, 0, 291, 1264, 1, 0, 0, 0, 293, 1272, 1, 0, 0, 0, 295, 1277, 1, 0, 0, 0, 297, 1283, 1, 0, 0, 0, 299, 1288, 1, 0, 0, 0, 301, 1296, 1, 0, 0, 0, 303, 1308, 1, 0, 0, 0, 305, 1313, 1, 0, 0, 0, 307, 1323, 1, 0, 0, 0, 309, 1329, 1, 0, 0, 0, 311, 1339, 1, 0, 0, 0, 313, 1349, 1, 0, 0, 0, 315, 1357, 1, 0, 0, 0, 317, 1367, 1, 0, 0, 0, 319, 1377, 1, 0, 0, 0, 321, 1388, 1, 0, 0, 0, 323, 1392, 1, 0, 0, 0, 325, 1403, 1, 0, 0, 0, 327, 1408, 1, 0, 0, 0, 329, 1418, 1, 0, 0, 0, 331, 1424, 1, 0, 0, 0, 333, 1437, 1, 0, 0, 0, 335, 1442, 1, 0, 0, 0, 337, 1453, 1, 0, 0, 0, 339, 1463, 1, 0, 0, 0, 341, 1470, 1, 0, 0, 0, 343, 1477, 1, 0, 0, 0, 345, 1482, 1, 0, 0, 0, 347, 1488, 1, 0, 0, 0, 349, 1495, 1, 0, 0, 0, 351, 1501, 1, 0, 0, 0, 353, 1507, 1, 0, 0, 0, 355, 1512, 1, 0, 0, 0, 357, 1519, 1, 0, 0, 0, 359, 1526, 1, 0, 0, 0, 361, 1534, 1, 0, 0, 0, 363, 1539, 1, 0, 0, 0, 365, 1546, 1, 0, 0, 0, 367, 1549, 1, 0, 0, 0, 369, 1592, 1, 0, 0, 0, 371, 1635, 1, 0, 0, 0, 373, 1646, 1, 0, 0, 0, 375, 1648, 1, 0, 0, 0, 377, 1659, 1, 0, 0, 0, 379, 1662, 1, 0, 0, 0, 381, 1680, 1, 0, 0, 0, 383, 1694, 1, 0, 0, 0, 385, 1698, 1, 0, 0, 0, 387, 1700, 1, 0, 0, 0, 389, 1702, 1, 0, 0, 0, 391, 392, 5, 59, 0, 0, 392, 2, 1, 0, 0, 0, 393, 394, 5, 46, 0, 0, 394, 4, 1, 0, 0, 0, 395, 396, 5, 40, 0, 0, 396, 6, 1, 0, 0, 0, 397, 398, 5, 41, 0, 0, 398, 8, 1, 0, 0, 0, 399, 400, 5, 44, 0, 0, 400, 10, 1, 0, 0, 0, 401, 402, 5, 61, 0, 0, 402, 12, 1, 0, 0, 0, 403, 404, 5, 42, 0, 0, 404, 14, 1, 0, 0, 0, 405, 406, 5, 43, 0, 0, 406, 16, 1, 0, 0, 0, 407, 408, 5, 45, 0, 0, 408, 18, 1, 0, 0, 0, 409, 410, 5, 126, 0, 0, 410, 20, 1, 0, 0, 0, 411, 412, 5, 124, 0, 0, 412, 413, 5, 124, 0, 0, 413, 22, 1, 0, 0, 0, 414, 415, 5, 47, 0, 0, 415, 24, 1, 0, 0, 0, 416, 417, 5, 37, 0, 0, 417, 26, 1, 0, 0, 0, 418, 419, 5, 60, 0, 0, 419, 420, 5, 60, 0, 0, 420, 28, 1, 0, 0, 0, 421, 422, 5, 62, 0, 0, 422, 423, 5, 62, 0, 0, 423, 30, 1, 0, 0, 0, 424, 425, 5, 38, 0, 0, 425, 32, 1, 0, 0, 0, 426, 427, 5, 124, 0, 0, 427, 34, 1, 0, 0, 0, 428, 429, 5, 60, 0, 0, 429, 36, 1, 0, 0, 0, 430, 431, 5, 60, 0, 0, 431, 432, 5, 61, 0, 0, 432, 38, 1, 0, 0, 0, 433, 434, 5, 62, 0, 0, 434, 40, 1, 0, 0, 0, 435, 436, 5, 62, 0, 0, 436, 437, 5, 61, 0, 0, 437, 42, 1, 0, 0, 0, 438, 439, 5, 61, 0, 0, 439, 440, 5, 61, 0, 0, 440, 44, 1, 0, 0, 0, 441, 442, 5, 33, 0, 0, 442, 443, 5, 61, 0, 0, 443, 46, 1, 0, 0, 0, 444, 445, 5, 60, 0, 0, 445, 446, 5, 62, 0, 0, 446, 48, 1, 0, 0, 0, 447, 448, 7, 0, 0, 0, 448, 449, 7, 1, 0, 0, 449, 450, 7, 2, 0, 0, 450, 451, 7, 3, 0, 0, 451, 452, 7, 4, 0, 0, 452, 50, 1, 0, 0, 0, 453, 454, 7, 0, 0, 0, 454, 455, 7, 5, 0, 0, 455, 456, 7, 4, 0, 0, 456, 457, 7, 6, 0, 0, 457, 458, 7, 2, 0, 0, 458, 459, 7, 7, 0, 0, 459, 52, 1, 0, 0, 0, 460, 461, 7, 0, 0, 0, 461, 462, 7, 8, 0, 0, 462, 463, 7, 8, 0, 0, 463, 54, 1, 0, 0, 0, 464, 465, 7, 0, 0, 0, 465, 466, 7, 9, 0, 0, 466, 467, 7, 4, 0, 0, 467, 468, 7, 10, 0, 0, 468, 469, 7, 3, 0, 0, 469, 56, 1, 0, 0, 0, 470, 471, 7, 0, 0, 0, 471, 472, 7, 11, 0, 0, 472, 473, 7, 11, 0, 0, 473, 58, 1, 0, 0, 0, 474, 475, 7, 0, 0, 0, 475, 476, 7, 11, 0, 0, 476, 477, 7, 4, 0, 0, 477, 478, 7, 10, 0, 0, 478, 479, 7, 3, 0, 0, 479, 60, 1, 0, 0, 0, 480, 481, 7, 0, 0, 0, 481, 482, 7, 7, 0, 0, 482, 483, 7, 0, 0, 0, 483, 484, 7, 11, 0, 0, 484, 485, 7, 12, 0, 0, 485, 486, 7, 13, 0, 0, 486, 487, 7, 10, 0, 0, 487, 62, 1, 0, 0, 0, 488, 489, 7, 0, 0, 0, 489, 490, 7, 7, 0, 0, 490, 491, 7, 8, 0, 0, 491, 64, 1, 0, 0, 0, 492, 493, 7, 0, 0, 0, 493, 494, 7, 14, 0, 0, 494, 66, 1, 0, 0, 0, 495, 496, 7, 0, 0, 0, 496, 497, 7, 14, 0, 0, 497, 498, 7, 5, 0, 0, 498, 68, 1, 0, 0, 0, 499, 500, 7, 0, 0, 0, 500, 501, 7, 4, 0, 0, 501, 502, 7, 4, 0, 0, 502, 503, 7, 0, 0, 0, 503, 504, 7, 5, 0, 0, 504, 505, 7, 15, 0, 0, 505, 70, 1, 0, 0, 0, 506, 507, 7, 0, 0, 0, 507, 508, 7, 16, 0, 0, 508, 509, 7, 4, 0, 0, 509, 510, 7, 2, 0, 0, 510, 511, 7, 6, 0, 0, 511, 512, 7, 7, 0, 0, 512, 513, 7, 5, 0, 0, 513, 514, 7, 3, 0, 0, 514, 515, 7, 10, 0, 0, 515, 516, 7, 17, 0, 0, 516, 517, 7, 10, 0, 0, 517, 518, 7, 7, 0, 0, 518, 519, 7, 4, 0, 0, 519, 72, 1, 0, 0, 0, 520, 521, 7, 1, 0, 0, 521, 522, 7, 10, 0, 0, 522, 523, 7, 9, 0, 0, 523, 524, 7, 2, 0, 0, 524, 525, 7, 3, 0, 0, 525, 526, 7, 10, 0, 0, 526, 74, 1, 0, 0, 0, 527, 528, 7, 1, 0, 0, 528, 529, 7, 10, 0, 0, 529, 530, 7, 18, 0, 0, 530, 531, 7, 6, 0, 0, 531, 532, 7, 7, 0, 0, 532, 76, 1, 0, 0, 0, 533, 534, 7, 1, 0, 0, 534, 535, 7, 10, 0, 0, 535, 536, 7, 4, 0, 0, 536, 537, 7, 19, 0, 0, 537, 538, 7, 10, 0, 0, 538, 539, 7, 10, 0, 0, 539, 540, 7, 7, 0, 0, 540, 78, 1, 0, 0, 0, 541, 542, 7, 1, 0, 0, 542, 543, 7, 12, 0, 0, 543, 80, 1, 0, 0, 0, 544, 545, 7, 5, 0, 0, 545, 546, 7, 0, 0, 0, 546, 547, 7, 14, 0, 0, 547, 548, 7, 5, 0, 0, 548, 549, 7, 0, 0, 0, 549, 550, 7, 8, 0, 0, 550, 551, 7, 10, 0, 0, 551, 82, 1, 0, 0, 0, 552, 553, 7, 5, 0, 0, 553, 554, 7, 0, 0, 0, 554, 555, 7, 14, 0, 0, 555, 556, 7, 10, 0, 0, 556, 84, 1, 0, 0, 0, 557, 558, 7, 5, 0, 0, 558, 559, 7, 0, 0, 0, 559, 560, 7, 14, 0, 0, 560, 561, 7, 4, 0, 0, 561, 86, 1, 0, 0, 0, 562, 563, 7, 5, 0, 0, 563, 564, 7, 15, 0, 0, 564, 565, 7, 10, 0, 0, 565, 566, 7, 5, 0, 0, 566, 567, 7, 20, 0, 0, 567, 88, 1, 0, 0, 0, 568, 569, 7, 5, 0, 0, 569, 570, 7, 2, 0, 0, 570, 571, 7, 11, 0, 0, 571, 572, 7, 11, 0, 0, 572, 573, 7, 0, 0, 0, 573, 574, 7, 4, 0, 0, 574, 575, 7, 10, 0, 0, 575, 90, 1, 0, 0, 0, 576, 577, 7, 5, 0, 0, 577, 578, 7, 2, 0, 0, 578, 579, 7, 11, 0, 0, 579, 580, 7, 16, 0, 0, 580, 581, 7, 17, 0, 0, 581, 582, 7, 7, 0, 0, 582, 92, 1, 0, 0, 0, 583, 584, 7, 5, 0, 0, 584, 585, 7, 2, 0, 0, 585, 586, 7, 17, 0, 0, 586, 587, 7, 17, 0, 0, 587, 588, 7, 6, 0, 0, 588, 589, 7, 4, 0, 0, 589, 94, 1, 0, 0, 0, 590, 591, 7, 5, 0, 0, 591, 592, 7, 2, 0, 0, 592, 593, 7, 7, 0, 0, 593, 594, 7, 9, 0, 0, 594, 595, 7, 11, 0, 0, 595, 596, 7, 6, 0, 0, 596, 597, 7, 5, 0, 0, 597, 598, 7, 4, 0, 0, 598, 96, 1, 0, 0, 0, 599, 600, 7, 5, 0, 0, 600, 601, 7, 2, 0, 0, 601, 602, 7, 7, 0, 0, 602, 603, 7, 14, 0, 0, 603, 604, 7, 4, 0, 0, 604, 605, 7, 3, 0, 0, 605, 606, 7, 0, 0, 0, 606, 607, 7, 6, 0, 0, 607, 608, 7, 7, 0, 0, 608, 609, 7, 4, 0, 0, 609, 98, 1, 0, 0, 0, 610, 611, 7, 5, 0, 0, 611, 612, 7, 3, 0, 0, 612, 613, 7, 10, 0, 0, 613, 614, 7, 0, 0, 0, 614, 615, 7, 4, 0, 0, 615, 616, 7, 10, 0, 0, 616, 100, 1, 0, 0, 0, 617, 618, 7, 5, 0, 0, 618, 619, 7, 3, 0, 0, 619, 620, 7, 2, 0, 0, 620, 621, 7, 14, 0, 0, 621, 622, 7, 14, 0, 0, 622, 102, 1, 0, 0, 0, 623, 624, 7, 5, 0, 0, 624, 625, 7, 16, 0, 0, 625, 626, 7, 3, 0, 0, 626, 627, 7, 3, 0, 0, 627, 628, 7, 10, 0, 0, 628, 629, 7, 7, 0, 0, 629, 630, 7, 4, 0, 0, 630, 631, 5, 95, 0, 0, 631, 632, 7, 8, 0, 0, 632, 633, 7, 0, 0, 0, 633, 634, 7, 4, 0, 0, 634, 635, 7, 10, 0, 0, 635, 104, 1, 0, 0, 0, 636, 637, 7, 5, 0, 0, 637, 638, 7, 16, 0, 0, 638, 639, 7, 3, 0, 0, 639, 640, 7, 3, 0, 0, 640, 641, 7, 10, 0, 0, 641, 642, 7, 7, 0, 0, 642, 643, 7, 4, 0, 0, 643, 644, 5, 95, 0, 0, 644, 645, 7, 4, 0, 0, 645, 646, 7, 6, 0, 0, 646, 647, 7, 17, 0, 0, 647, 648, 7, 10, 0, 0, 648, 106, 1, 0, 0, 0, 649, 650, 7, 5, 0, 0, 650, 651, 7, 16, 0, 0, 651, 652, 7, 3, 0, 0, 652, 653, 7, 3, 0, 0, 653, 654, 7, 10, 0, 0, 654, 655, 7, 7, 0, 0, 655, 656, 7, 4, 0, 0, 656, 657, 5, 95, 0, 0, 657, 658, 7, 4, 0, 0, 658, 659, 7, 6, 0, 0, 659, 660, 7, 17, 0, 0, 660, 661, 7, 10, 0, 0, 661, 662, 7, 14, 0, 0, 662, 663, 7, 4, 0, 0, 663, 664, 7, 0, 0, 0, 664, 665, 7, 17, 0, 0, 665, 666, 7, 21, 0, 0, 666, 108, 1, 0, 0, 0, 667, 668, 7, 8, 0, 0, 668, 669, 7, 0, 0, 0, 669, 670, 7, 4, 0, 0, 670, 671, 7, 0, 0, 0, 671, 672, 7, 1, 0, 0, 672, 673, 7, 0, 0, 0, 673, 674, 7, 14, 0, 0, 674, 675, 7, 10, 0, 0, 675, 110, 1, 0, 0, 0, 676, 677, 7, 8, 0, 0, 677, 678, 7, 10, 0, 0, 678, 679, 7, 9, 0, 0, 679, 680, 7, 0, 0, 0, 680, 681, 7, 16, 0, 0, 681, 682, 7, 11, 0, 0, 682, 683, 7, 4, 0, 0, 683, 112, 1, 0, 0, 0, 684, 685, 7, 8, 0, 0, 685, 686, 7, 10, 0, 0, 686, 687, 7, 9, 0, 0, 687, 688, 7, 10, 0, 0, 688, 689, 7, 3, 0, 0, 689, 690, 7, 3, 0, 0, 690, 691, 7, 0, 0, 0, 691, 692, 7, 1, 0, 0, 692, 693, 7, 11, 0, 0, 693, 694, 7, 10, 0, 0, 694, 114, 1, 0, 0, 0, 695, 696, 7, 8, 0, 0, 696, 697, 7, 10, 0, 0, 697, 698, 7, 9, 0, 0, 698, 699, 7, 10, 0, 0, 699, 700, 7, 3, 0, 0, 700, 701, 7, 3, 0, 0, 701, 702, 7, 10, 0, 0, 702, 703, 7, 8, 0, 0, 703, 116, 1, 0, 0, 0, 704, 705, 7, 8, 0, 0, 705, 706, 7, 10, 0, 0, 706, 707, 7, 11, 0, 0, 707, 708, 7, 10, 0, 0, 708, 709, 7, 4, 0, 0, 709, 710, 7, 10, 0, 0, 710, 118, 1, 0, 0, 0, 711, 712, 7, 8, 0, 0, 712, 713, 7, 10, 0, 0, 713, 714, 7, 14, 0, 0, 714, 715, 7, 5, 0, 0, 715, 120, 1, 0, 0, 0, 716, 717, 7, 8, 0, 0, 717, 718, 7, 10, 0, 0, 718, 719, 7, 4, 0, 0, 719, 720, 7, 0, 0, 0, 720, 721, 7, 5, 0, 0, 721, 722, 7, 15, 0, 0, 722, 122, 1, 0, 0, 0, 723, 724, 7, 8, 0, 0, 724, 725, 7, 6, 0, 0, 725, 726, 7, 14, 0, 0, 726, 727, 7, 4, 0, 0, 727, 728, 7, 6, 0, 0, 728, 729, 7, 7, 0, 0, 729, 730, 7, 5, 0, 0, 730, 731, 7, 4, 0, 0, 731, 124, 1, 0, 0, 0, 732, 733, 7, 8, 0, 0, 733, 734, 7, 3, 0, 0, 734, 735, 7, 2, 0, 0, 735, 736, 7, 21, 0, 0, 736, 126, 1, 0, 0, 0, 737, 738, 7, 10, 0, 0, 738, 739, 7, 0, 0, 0, 739, 740, 7, 5, 0, 0, 740, 741, 7, 15, 0, 0, 741, 128, 1, 0, 0, 0, 742, 743, 7, 10, 0, 0, 743, 744, 7, 11, 0, 0, 744, 745, 7, 14, 0, 0, 745, 746, 7, 10, 0, 0, 746, 130, 1, 0, 0, 0, 747, 748, 7, 10, 0, 0, 748, 749, 7, 7, 0, 0, 749, 750, 7, 8, 0, 0, 750, 132, 1, 0, 0, 0, 751, 752, 7, 10, 0, 0, 752, 753, 7, 14, 0, 0, 753, 754, 7, 5, 0, 0, 754, 755, 7, 0, 0, 0, 755, 756, 7, 21, 0, 0, 756, 757, 7, 10, 0, 0, 757, 134, 1, 0, 0, 0, 758, 759, 7, 10, 0, 0, 759, 760, 7, 22, 0, 0, 760, 761, 7, 5, 0, 0, 761, 762, 7, 10, 0, 0, 762, 763, 7, 21, 0, 0, 763, 764, 7, 4, 0, 0, 764, 136, 1, 0, 0, 0, 765, 766, 7, 10, 0, 0, 766, 767, 7, 22, 0, 0, 767, 768, 7, 5, 0, 0, 768, 769, 7, 11, 0, 0, 769, 770, 7, 16, 0, 0, 770, 771, 7, 14, 0, 0, 771, 772, 7, 6, 0, 0, 772, 773, 7, 23, 0, 0, 773, 774, 7, 10, 0, 0, 774, 138, 1, 0, 0, 0, 775, 776, 7, 10, 0, 0, 776, 777, 7, 22, 0, 0, 777, 778, 7, 6, 0, 0, 778, 779, 7, 14, 0, 0, 779, 780, 7, 4, 0, 0, 780, 781, 7, 14, 0, 0, 781, 140, 1, 0, 0, 0, 782, 783, 7, 10, 0, 0, 783, 784, 7, 22, 0, 0, 784, 785, 7, 21, 0, 0, 785, 786, 7, 11, 0, 0, 786, 787, 7, 0, 0, 0, 787, 788, 7, 6, 0, 0, 788, 789, 7, 7, 0, 0, 789, 142, 1, 0, 0, 0, 790, 791, 7, 9, 0, 0, 791, 792, 7, 0, 0, 0, 792, 793, 7, 6, 0, 0, 793, 794, 7, 11, 0, 0, 794, 144, 1, 0, 0, 0, 795, 796, 7, 9, 0, 0, 796, 797, 7, 2, 0, 0, 797, 798, 7, 3, 0, 0, 798, 146, 1, 0, 0, 0, 799, 800, 7, 9, 0, 0, 800, 801, 7, 2, 0, 0, 801, 802, 7, 3, 0, 0, 802, 803, 7, 10, 0, 0, 803, 804, 7, 6, 0, 0, 804, 805, 7, 18, 0, 0, 805, 806, 7, 7, 0, 0, 806, 148, 1, 0, 0, 0, 807, 808, 7, 9, 0, 0, 808, 809, 7, 3, 0, 0, 809, 810, 7, 2, 0, 0, 810, 811, 7, 17, 0, 0, 811, 150, 1, 0, 0, 0, 812, 813, 7, 9, 0, 0, 813, 814, 7, 16, 0, 0, 814, 815, 7, 11, 0, 0, 815, 816, 7, 11, 0, 0, 816, 152, 1, 0, 0, 0, 817, 818, 7, 18, 0, 0, 818, 819, 7, 11, 0, 0, 819, 820, 7, 2, 0, 0, 820, 821, 7, 1, 0, 0, 821, 154, 1, 0, 0, 0, 822, 823, 7, 18, 0, 0, 823, 824, 7, 3, 0, 0, 824, 825, 7, 2, 0, 0, 825, 826, 7, 16, 0, 0, 826, 827, 7, 21, 0, 0, 827, 156, 1, 0, 0, 0, 828, 829, 7, 15, 0, 0, 829, 830, 7, 0, 0, 0, 830, 831, 7, 23, 0, 0, 831, 832, 7, 6, 0, 0, 832, 833, 7, 7, 0, 0, 833, 834, 7, 18, 0, 0, 834, 158, 1, 0, 0, 0, 835, 836, 7, 6, 0, 0, 836, 837, 7, 9, 0, 0, 837, 160, 1, 0, 0, 0, 838, 839, 7, 6, 0, 0, 839, 840, 7, 18, 0, 0, 840, 841, 7, 7, 0, 0, 841, 842, 7, 2, 0, 0, 842, 843, 7, 3, 0, 0, 843, 844, 7, 10, 0, 0, 844, 162, 1, 0, 0, 0, 845, 846, 7, 6, 0, 0, 846, 847, 7, 17, 0, 0, 847, 848, 7, 17, 0, 0, 848, 849, 7, 10, 0, 0, 849, 850, 7, 8, 0, 0, 850, 851, 7, 6, 0, 0, 851, 852, 7, 0, 0, 0, 852, 853, 7, 4, 0, 0, 853, 854, 7, 10, 0, 0, 854, 164, 1, 0, 0, 0, 855, 856, 7, 6, 0, 0, 856, 857, 7, 7, 0, 0, 857, 166, 1, 0, 0, 0, 858, 859, 7, 6, 0, 0, 859, 860, 7, 7, 0, 0, 860, 861, 7, 8, 0, 0, 861, 862, 7, 10, 0, 0, 862, 863, 7, 22, 0, 0, 863, 168, 1, 0, 0, 0, 864, 865, 7, 6, 0, 0, 865, 866, 7, 7, 0, 0, 866, 867, 7, 8, 0, 0, 867, 868, 7, 10, 0, 0, 868, 869, 7, 22, 0, 0, 869, 870, 7, 10, 0, 0, 870, 871, 7, 8, 0, 0, 871, 170, 1, 0, 0, 0, 872, 873, 7, 6, 0, 0, 873, 874, 7, 7, 0, 0, 874, 875, 7, 6, 0, 0, 875, 876, 7, 4, 0, 0, 876, 877, 7, 6, 0, 0, 877, 878, 7, 0, 0, 0, 878, 879, 7, 11, 0, 0, 879, 880, 7, 11, 0, 0, 880, 881, 7, 12, 0, 0, 881, 172, 1, 0, 0, 0, 882, 883, 7, 6, 0, 0, 883, 884, 7, 7, 0, 0, 884, 885, 7, 7, 0, 0, 885, 886, 7, 10, 0, 0, 886, 887, 7, 3, 0, 0, 887, 174, 1, 0, 0, 0, 888, 889, 7, 6, 0, 0, 889, 890, 7, 7, 0, 0, 890, 891, 7, 14, 0, 0, 891, 892, 7, 10, 0, 0, 892, 893, 7, 3, 0, 0, 893, 894, 7, 4, 0, 0, 894, 176, 1, 0, 0, 0, 895, 896, 7, 6, 0, 0, 896, 897, 7, 7, 0, 0, 897, 898, 7, 14, 0, 0, 898, 899, 7, 4, 0, 0, 899, 900, 7, 10, 0, 0, 900, 901, 7, 0, 0, 0, 901, 902, 7, 8, 0, 0, 902, 178, 1, 0, 0, 0, 903, 904, 7, 6, 0, 0, 904, 905, 7, 7, 0, 0, 905, 906, 7, 4, 0, 0, 906, 907, 7, 10, 0, 0, 907, 908, 7, 3, 0, 0, 908, 909, 7, 14, 0, 0, 909, 910, 7, 10, 0, 0, 910, 911, 7, 5, 0, 0, 911, 912, 7, 4, 0, 0, 912, 180, 1, 0, 0, 0, 913, 914, 7, 6, 0, 0, 914, 915, 7, 7, 0, 0, 915, 916, 7, 4, 0, 0, 916, 917, 7, 2, 0, 0, 917, 182, 1, 0, 0, 0, 918, 919, 7, 6, 0, 0, 919, 920, 7, 14, 0, 0, 920, 184, 1, 0, 0, 0, 921, 922, 7, 6, 0, 0, 922, 923, 7, 14, 0, 0, 923, 924, 7, 7, 0, 0, 924, 925, 7, 16, 0, 0, 925, 926, 7, 11, 0, 0, 926, 927, 7, 11, 0, 0, 927, 186, 1, 0, 0, 0, 928, 929, 7, 24, 0, 0, 929, 930, 7, 2, 0, 0, 930, 931, 7, 6, 0, 0, 931, 932, 7, 7, 0, 0, 932, 188, 1, 0, 0, 0, 933, 934, 7, 20, 0, 0, 934, 935, 7, 10, 0, 0, 935, 936, 7, 12, 0, 0, 936, 190, 1, 0, 0, 0, 937, 938, 7, 11, 0, 0, 938, 939, 7, 10, 0, 0, 939, 940, 7, 9, 0, 0, 940, 941, 7, 4, 0, 0, 941, 192, 1, 0, 0, 0, 942, 943, 7, 11, 0, 0, 943, 944, 7, 6, 0, 0, 944, 945, 7, 20, 0, 0, 945, 946, 7, 10, 0, 0, 946, 194, 1, 0, 0, 0, 947, 948, 7, 11, 0, 0, 948, 949, 7, 6, 0, 0, 949, 950, 7, 17, 0, 0, 950, 951, 7, 6, 0, 0, 951, 952, 7, 4, 0, 0, 952, 196, 1, 0, 0, 0, 953, 954, 7, 17, 0, 0, 954, 955, 7, 0, 0, 0, 955, 956, 7, 4, 0, 0, 956, 957, 7, 5, 0, 0, 957, 958, 7, 15, 0, 0, 958, 198, 1, 0, 0, 0, 959, 960, 7, 7, 0, 0, 960, 961, 7, 0, 0, 0, 961, 962, 7, 4, 0, 0, 962, 963, 7, 16, 0, 0, 963, 964, 7, 3, 0, 0, 964, 965, 7, 0, 0, 0, 965, 966, 7, 11, 0, 0, 966, 200, 1, 0, 0, 0, 967, 968, 7, 7, 0, 0, 968, 969, 7, 2, 0, 0, 969, 202, 1, 0, 0, 0, 970, 971, 7, 7, 0, 0, 971, 972, 7, 2, 0, 0, 972, 973, 7, 4, 0, 0, 973, 204, 1, 0, 0, 0, 974, 975, 7, 7, 0, 0, 975, 976, 7, 2, 0, 0, 976, 977, 7, 4, 0, 0, 977, 978, 7, 7, 0, 0, 978, 979, 7, 16, 0, 0, 979, 980, 7, 11, 0, 0, 980, 981, 7, 11, 0, 0, 981, 206, 1, 0, 0, 0, 982, 983, 7, 7, 0, 0, 983, 984, 7, 16, 0, 0, 984, 985, 7, 11, 0, 0, 985, 986, 7, 11, 0, 0, 986, 208, 1, 0, 0, 0, 987, 988, 7, 2, 0, 0, 988, 989, 7, 9, 0, 0, 989, 210, 1, 0, 0, 0, 990, 991, 7, 2, 0, 0, 991, 992, 7, 9, 0, 0, 992, 993, 7, 9, 0, 0, 993, 994, 7, 14, 0, 0, 994, 995, 7, 10, 0, 0, 995, 996, 7, 4, 0, 0, 996, 212, 1, 0, 0, 0, 997, 998, 7, 2, 0, 0, 998, 999, 7, 7, 0, 0, 999, 214, 1, 0, 0, 0, 1000, 1001, 7, 2, 0, 0, 1001, 1002, 7, 3, 0, 0, 1002, 216, 1, 0, 0, 0, 1003, 1004, 7, 2, 0, 0, 1004, 1005, 7, 3, 0, 0, 1005, 1006, 7, 8, 0, 0, 1006, 1007, 7, 10, 0, 0, 1007, 1008, 7, 3, 0, 0, 1008, 218, 1, 0, 0, 0, 1009, 1010, 7, 2, 0, 0, 1010, 1011, 7, 16, 0, 0, 1011, 1012, 7, 4, 0, 0, 1012, 1013, 7, 10, 0, 0, 1013, 1014, 7, 3, 0, 0, 1014, 220, 1, 0, 0, 0, 1015, 1016, 7, 21, 0, 0, 1016, 1017, 7, 11, 0, 0, 1017, 1018, 7, 0, 0, 0, 1018, 1019, 7, 7, 0, 0, 1019, 222, 1, 0, 0, 0, 1020, 1021, 7, 21, 0, 0, 1021, 1022, 7, 3, 0, 0, 1022, 1023, 7, 0, 0, 0, 1023, 1024, 7, 18, 0, 0, 1024, 1025, 7, 17, 0, 0, 1025, 1026, 7, 0, 0, 0, 1026, 224, 1, 0, 0, 0, 1027, 1028, 7, 21, 0, 0, 1028, 1029, 7, 3, 0, 0, 1029, 1030, 7, 6, 0, 0, 1030, 1031, 7, 17, 0, 0, 1031, 1032, 7, 0, 0, 0, 1032, 1033, 7, 3, 0, 0, 1033, 1034, 7, 12, 0, 0, 1034, 226, 1, 0, 0, 0, 1035, 1036, 7, 25, 0, 0, 1036, 1037, 7, 16, 0, 0, 1037, 1038, 7, 10, 0, 0, 1038, 1039, 7, 3, 0, 0, 1039, 1040, 7, 12, 0, 0, 1040, 228, 1, 0, 0, 0, 1041, 1042, 7, 3, 0, 0, 1042, 1043, 7, 0, 0, 0, 1043, 1044, 7, 6, 0, 0, 1044, 1045, 7, 14, 0, 0, 1045, 1046, 7, 10, 0, 0, 1046, 230, 1, 0, 0, 0, 1047, 1048, 7, 3, 0, 0, 1048, 1049, 7, 10, 0, 0, 1049, 1050, 7, 5, 0, 0, 1050, 1051, 7, 16, 0, 0, 1051, 1052, 7, 3, 0, 0, 1052, 1053, 7, 14, 0, 0, 1053, 1054, 7, 6, 0, 0, 1054, 1055, 7, 23, 0, 0, 1055, 1056, 7, 10, 0, 0, 1056, 232, 1, 0, 0, 0, 1057, 1058, 7, 3, 0, 0, 1058, 1059, 7, 10, 0, 0, 1059, 1060, 7, 9, 0, 0, 1060, 1061, 7, 10, 0, 0, 1061, 1062, 7, 3, 0, 0, 1062, 1063, 7, 10, 0, 0, 1063, 1064, 7, 7, 0, 0, 1064, 1065, 7, 5, 0, 0, 1065, 1066, 7, 10, 0, 0, 1066, 1067, 7, 14, 0, 0, 1067, 234, 1, 0, 0, 0, 1068, 1069, 7, 3, 0, 0, 1069, 1070, 7, 10, 0, 0, 1070, 1071, 7, 18, 0, 0, 1071, 1072, 7, 10, 0, 0, 1072, 1073, 7, 22, 0, 0, 1073, 1074, 7, 21, 0, 0, 1074, 236, 1, 0, 0, 0, 1075, 1076, 7, 3, 0, 0, 1076, 1077, 7, 10, 0, 0, 1077, 1078, 7, 6, 0, 0, 1078, 1079, 7, 7, 0, 0, 1079, 1080, 7, 8, 0, 0, 1080, 1081, 7, 10, 0, 0, 1081, 1082, 7, 22, 0, 0, 1082, 238, 1, 0, 0, 0, 1083, 1084, 7, 3, 0, 0, 1084, 1085, 7, 10, 0, 0, 1085, 1086, 7, 11, 0, 0, 1086, 1087, 7, 10, 0, 0, 1087, 1088, 7, 0, 0, 0, 1088, 1089, 7, 14, 0, 0, 1089, 1090, 7, 10, 0, 0, 1090, 240, 1, 0, 0, 0, 1091, 1092, 7, 3, 0, 0, 1092, 1093, 7, 10, 0, 0, 1093, 1094, 7, 7, 0, 0, 1094, 1095, 7, 0, 0, 0, 1095, 1096, 7, 17, 0, 0, 1096, 1097, 7, 10, 0, 0, 1097, 242, 1, 0, 0, 0, 1098, 1099, 7, 3, 0, 0, 1099, 1100, 7, 10, 0, 0, 1100, 1101, 7, 21, 0, 0, 1101, 1102, 7, 11, 0, 0, 1102, 1103, 7, 0, 0, 0, 1103, 1104, 7, 5, 0, 0, 1104, 1105, 7, 10, 0, 0, 1105, 244, 1, 0, 0, 0, 1106, 1107, 7, 3, 0, 0, 1107, 1108, 7, 10, 0, 0, 1108, 1109, 7, 14, 0, 0, 1109, 1110, 7, 4, 0, 0, 1110, 1111, 7, 3, 0, 0, 1111, 1112, 7, 6, 0, 0, 1112, 1113, 7, 5, 0, 0, 1113, 1114, 7, 4, 0, 0, 1114, 246, 1, 0, 0, 0, 1115, 1116, 7, 3, 0, 0, 1116, 1117, 7, 10, 0, 0, 1117, 1118, 7, 4, 0, 0, 1118, 1119, 7, 16, 0, 0, 1119, 1120, 7, 3, 0, 0, 1120, 1121, 7, 7, 0, 0, 1121, 1122, 7, 6, 0, 0, 1122, 1123, 7, 7, 0, 0, 1123, 1124, 7, 18, 0, 0, 1124, 248, 1, 0, 0, 0, 1125, 1126, 7, 3, 0, 0, 1126, 1127, 7, 6, 0, 0, 1127, 1128, 7, 18, 0, 0, 1128, 1129, 7, 15, 0, 0, 1129, 1130, 7, 4, 0, 0, 1130, 250, 1, 0, 0, 0, 1131, 1132, 7, 3, 0, 0, 1132, 1133, 7, 2, 0, 0, 1133, 1134, 7, 11, 0, 0, 1134, 1135, 7, 11, 0, 0, 1135, 1136, 7, 1, 0, 0, 1136, 1137, 7, 0, 0, 0, 1137, 1138, 7, 5, 0, 0, 1138, 1139, 7, 20, 0, 0, 1139, 252, 1, 0, 0, 0, 1140, 1141, 7, 3, 0, 0, 1141, 1142, 7, 2, 0, 0, 1142, 1143, 7, 19, 0, 0, 1143, 254, 1, 0, 0, 0, 1144, 1145, 7, 3, 0, 0, 1145, 1146, 7, 2, 0, 0, 1146, 1147, 7, 19, 0, 0, 1147, 1148, 7, 14, 0, 0, 1148, 256, 1, 0, 0, 0, 1149, 1150, 7, 14, 0, 0, 1150, 1151, 7, 0, 0, 0, 1151, 1152, 7, 23, 0, 0, 1152, 1153, 7, 10, 0, 0, 1153, 1154, 7, 21, 0, 0, 1154, 1155, 7, 2, 0, 0, 1155, 1156, 7, 6, 0, 0, 1156, 1157, 7, 7, 0, 0, 1157, 1158, 7, 4, 0, 0, 1158, 258, 1, 0, 0, 0, 1159, 1160, 7, 14, 0, 0, 1160, 1161, 7, 10, 0, 0, 1161, 1162, 7, 11, 0, 0, 1162, 1163, 7, 10, 0, 0, 1163, 1164, 7, 5, 0, 0, 1164, 1165, 7, 4, 0, 0, 1165, 260, 1, 0, 0, 0, 1166, 1167, 7, 14, 0, 0, 1167, 1168, 7, 10, 0, 0, 1168, 1169, 7, 4, 0, 0, 1169, 262, 1, 0, 0, 0, 1170, 1171, 7, 4, 0, 0, 1171, 1172, 7, 0, 0, 0, 1172, 1173, 7, 1, 0, 0, 1173, 1174, 7, 11, 0, 0, 1174, 1175, 7, 10, 0, 0, 1175, 264, 1, 0, 0, 0, 1176, 1177, 7, 4, 0, 0, 1177, 1178, 7, 10, 0, 0, 1178, 1179, 7, 17, 0, 0, 1179, 1180, 7, 21, 0, 0, 1180, 266, 1, 0, 0, 0, 1181, 1182, 7, 4, 0, 0, 1182, 1183, 7, 10, 0, 0, 1183, 1184, 7, 17, 0, 0, 1184, 1185, 7, 21, 0, 0, 1185, 1186, 7, 2, 0, 0, 1186, 1187, 7, 3, 0, 0, 1187, 1188, 7, 0, 0, 0, 1188, 1189, 7, 3, 0, 0, 1189, 1190, 7, 12, 0, 0, 1190, 268, 1, 0, 0, 0, 1191, 1192, 7, 4, 0, 0, 1192, 1193, 7, 15, 0, 0, 1193, 1194, 7, 10, 0, 0, 1194, 1195, 7, 7, 0, 0, 1195, 270, 1, 0, 0, 0, 1196, 1197, 7, 4, 0, 0, 1197, 1198, 7, 2, 0, 0, 1198, 272, 1, 0, 0, 0, 1199, 1200, 7, 4, 0, 0, 1200, 1201, 7, 3, 0, 0, 1201, 1202, 7, 0, 0, 0, 1202, 1203, 7, 7, 0, 0, 1203, 1204, 7, 14, 0, 0, 1204, 1205, 7, 0, 0, 0, 1205, 1206, 7, 5, 0, 0, 1206, 1207, 7, 4, 0, 0, 1207, 1208, 7, 6, 0, 0, 1208, 1209, 7, 2, 0, 0, 1209, 1210, 7, 7, 0, 0, 1210, 274, 1, 0, 0, 0, 1211, 1212, 7, 4, 0, 0, 1212, 1213, 7, 3, 0, 0, 1213, 1214, 7, 6, 0, 0, 1214, 1215, 7, 18, 0, 0, 1215, 1216, 7, 18, 0, 0, 1216, 1217, 7, 10, 0, 0, 1217, 1218, 7, 3, 0, 0, 1218, 276, 1, 0, 0, 0, 1219, 1220, 7, 16, 0, 0, 1220, 1221, 7, 7, 0, 0, 1221, 1222, 7, 6, 0, 0, 1222, 1223, 7, 2, 0, 0, 1223, 1224, 7, 7, 0, 0, 1224, 278, 1, 0, 0, 0, 1225, 1226, 7, 16, 0, 0, 1226, 1227, 7, 7, 0, 0, 1227, 1228, 7, 6, 0, 0, 1228, 1229, 7, 25, 0, 0, 1229, 1230, 7, 16, 0, 0, 1230, 1231, 7, 10, 0, 0, 1231, 280, 1, 0, 0, 0, 1232, 1233, 7, 16, 0, 0, 1233, 1234, 7, 21, 0, 0, 1234, 1235, 7, 8, 0, 0, 1235, 1236, 7, 0, 0, 0, 1236, 1237, 7, 4, 0, 0, 1237, 1238, 7, 10, 0, 0, 1238, 282, 1, 0, 0, 0, 1239, 1240, 7, 16, 0, 0, 1240, 1241, 7, 14, 0, 0, 1241, 1242, 7, 6, 0, 0, 1242, 1243, 7, 7, 0, 0, 1243, 1244, 7, 18, 0, 0, 1244, 284, 1, 0, 0, 0, 1245, 1246, 7, 23, 0, 0, 1246, 1247, 7, 0, 0, 0, 1247, 1248, 7, 5, 0, 0, 1248, 1249, 7, 16, 0, 0, 1249, 1250, 7, 16, 0, 0, 1250, 1251, 7, 17, 0, 0, 1251, 286, 1, 0, 0, 0, 1252, 1253, 7, 23, 0, 0, 1253, 1254, 7, 0, 0, 0, 1254, 1255, 7, 11, 0, 0, 1255, 1256, 7, 16, 0, 0, 1256, 1257, 7, 10, 0, 0, 1257, 1258, 7, 14, 0, 0, 1258, 288, 1, 0, 0, 0, 1259, 1260, 7, 23, 0, 0, 1260, 1261, 7, 6, 0, 0, 1261, 1262, 7, 10, 0, 0, 1262, 1263, 7, 19, 0, 0, 1263, 290, 1, 0, 0, 0, 1264, 1265, 7, 23, 0, 0, 1265, 1266, 7, 6, 0, 0, 1266, 1267, 7, 3, 0, 0, 1267, 1268, 7, 4, 0, 0, 1268, 1269, 7, 16, 0, 0, 1269, 1270, 7, 0, 0, 0, 1270, 1271, 7, 11, 0, 0, 1271, 292, 1, 0, 0, 0, 1272, 1273, 7, 19, 0, 0, 1273, 1274, 7, 15, 0, 0, 1274, 1275, 7, 10, 0, 0, 1275, 1276, 7, 7, 0, 0, 1276, 294, 1, 0, 0, 0, 1277, 1278, 7, 19, 0, 0, 1278, 1279, 7, 15, 0, 0, 1279, 1280, 7, 10, 0, 0, 1280, 1281, 7, 3, 0, 0, 1281, 1282, 7, 10, 0, 0, 1282, 296, 1, 0, 0, 0, 1283, 1284, 7, 19, 0, 0, 1284, 1285, 7, 6, 0, 0, 1285, 1286, 7, 4, 0, 0, 1286, 1287, 7, 15, 0, 0, 1287, 298, 1, 0, 0, 0, 1288, 1289, 7, 19, 0, 0, 1289, 1290, 7, 6, 0, 0, 1290, 1291, 7, 4, 0, 0, 1291, 1292, 7, 15, 0, 0, 1292, 1293, 7, 2, 0, 0, 1293, 1294, 7, 16, 0, 0, 1294, 1295, 7, 4, 0, 0, 1295, 300, 1, 0, 0, 0, 1296, 1297, 7, 9, 0, 0, 1297, 1298, 7, 6, 0, 0, 1298, 1299, 7, 3, 0, 0, 1299, 1300, 7, 14, 0, 0, 1300, 1301, 7, 4, 0, 0, 1301, 1302, 5, 95, 0, 0, 1302, 1303, 7, 23, 0, 0, 1303, 1304, 7, 0, 0, 0, 1304, 1305, 7, 11, 0, 0, 1305, 1306, 7, 16, 0, 0, 1306, 1307, 7, 10, 0, 0, 1307, 302, 1, 0, 0, 0, 1308, 1309, 7, 2, 0, 0, 1309, 1310, 7, 23, 0, 0, 1310, 1311, 7, 10, 0, 0, 1311, 1312, 7, 3, 0, 0, 1312, 304, 1, 0, 0, 0, 1313, 1314, 7, 21, 0, 0, 1314, 1315, 7, 0, 0, 0, 1315, 1316, 7, 3, 0, 0, 1316, 1317, 7, 4, 0, 0, 1317, 1318, 7, 6, 0, 0, 1318, 1319, 7, 4, 0, 0, 1319, 1320, 7, 6, 0, 0, 1320, 1321, 7, 2, 0, 0, 1321, 1322, 7, 7, 0, 0, 1322, 306, 1, 0, 0, 0, 1323, 1324, 7, 3, 0, 0, 1324, 1325, 7, 0, 0, 0, 1325, 1326, 7, 7, 0, 0, 1326, 1327, 7, 18, 0, 0, 1327, 1328, 7, 10, 0, 0, 1328, 308, 1, 0, 0, 0, 1329, 1330, 7, 21, 0, 0, 1330, 1331, 7, 3, 0, 0, 1331, 1332, 7, 10, 0, 0, 1332, 1333, 7, 5, 0, 0, 1333, 1334, 7, 10, 0, 0, 1334, 1335, 7, 8, 0, 0, 1335, 1336, 7, 6, 0, 0, 1336, 1337, 7, 7, 0, 0, 1337, 1338, 7, 18, 0, 0, 1338, 310, 1, 0, 0, 0, 1339, 1340, 7, 16, 0, 0, 1340, 1341, 7, 7, 0, 0, 1341, 1342, 7, 1, 0, 0, 1342, 1343, 7, 2, 0, 0, 1343, 1344, 7, 16, 0, 0, 1344, 1345, 7, 7, 0, 0, 1345, 1346, 7, 8, 0, 0, 1346, 1347, 7, 10, 0, 0, 1347, 1348, 7, 8, 0, 0, 1348, 312, 1, 0, 0, 0, 1349, 1350, 7, 5, 0, 0, 1350, 1351, 7, 16, 0, 0, 1351, 1352, 7, 3, 0, 0, 1352, 1353, 7, 3, 0, 0, 1353, 1354, 7, 10, 0, 0, 1354, 1355, 7, 7, 0, 0, 1355, 1356, 7, 4, 0, 0, 1356, 314, 1, 0, 0, 0, 1357, 1358, 7, 9, 0, 0, 1358, 1359, 7, 2, 0, 0, 1359, 1360, 7, 11, 0, 0, 1360, 1361, 7, 11, 0, 0, 1361, 1362, 7, 2, 0, 0, 1362, 1363, 7, 19, 0, 0, 1363, 1364, 7, 6, 0, 0, 1364, 1365, 7, 7, 0, 0, 1365, 1366, 7, 18, 0, 0, 1366, 316, 1, 0, 0, 0, 1367, 1368, 7, 5, 0, 0, 1368, 1369, 7, 16, 0, 0, 1369, 1370, 7, 17, 0, 0, 1370, 1371, 7, 10, 0, 0, 1371, 1372, 5, 95, 0, 0, 1372, 1373, 7, 8, 0, 0, 1373, 1374, 7, 6, 0, 0, 1374, 1375, 7, 14, 0, 0, 1375, 1376, 7, 4, 0, 0, 1376, 318, 1, 0, 0, 0, 1377, 1378, 7, 8, 0, 0, 1378, 1379, 7, 10, 0, 0, 1379, 1380, 7, 7, 0, 0, 1380, 1381, 7, 14, 0, 0, 1381, 1382, 7, 10, 0, 0, 1382, 1383, 5, 95, 0, 0, 1383, 1384, 7, 3, 0, 0, 1384, 1385, 7, 0, 0, 0, 1385, 1386, 7, 7, 0, 0, 1386, 1387, 7, 20, 0, 0, 1387, 320, 1, 0, 0, 0, 1388, 1389, 7, 11, 0, 0, 1389, 1390, 7, 0, 0, 0, 1390, 1391, 7, 18, 0, 0, 1391, 322, 1, 0, 0, 0, 1392, 1393, 7, 11, 0, 0, 1393, 1394, 7, 0, 0, 0, 1394, 1395, 7, 14, 0, 0, 1395, 1396, 7, 4, 0, 0, 1396, 1397, 5, 95, 0, 0, 1397, 1398, 7, 23, 0, 0, 1398, 1399, 7, 0, 0, 0, 1399, 1400, 7, 11, 0, 0, 1400, 1401, 7, 16, 0, 0, 1401, 1402, 7, 10, 0, 0, 1402, 324, 1, 0, 0, 0, 1403, 1404, 7, 11, 0, 0, 1404, 1405, 7, 10, 0, 0, 1405, 1406, 7, 0, 0, 0, 1406, 1407, 7, 8, 0, 0, 1407, 326, 1, 0, 0, 0, 1408, 1409, 7, 7, 0, 0, 1409, 1410, 7, 4, 0, 0, 1410, 1411, 7, 15, 0, 0, 1411, 1412, 5, 95, 0, 0, 1412, 1413, 7, 23, 0, 0, 1413, 1414, 7, 0, 0, 0, 1414, 1415, 7, 11, 0, 0, 1415, 1416, 7, 16, 0, 0, 1416, 1417, 7, 10, 0, 0, 1417, 328, 1, 0, 0, 0, 1418, 1419, 7, 7, 0, 0, 1419, 1420, 7, 4, 0, 0, 1420, 1421, 7, 6, 0, 0, 1421, 1422, 7, 11, 0, 0, 1422, 1423, 7, 10, 0, 0, 1423, 330, 1, 0, 0, 0, 1424, 1425, 7, 21, 0, 0, 1425, 1426, 7, 10, 0, 0, 1426, 1427, 7, 3, 0, 0, 1427, 1428, 7, 5, 0, 0, 1428, 1429, 7, 10, 0, 0, 1429, 1430, 7, 7, 0, 0, 1430, 1431, 7, 4, 0, 0, 1431, 1432, 5, 95, 0, 0, 1432, 1433, 7, 3, 0, 0, 1433, 1434, 7, 0, 0, 0, 1434, 1435, 7, 7, 0, 0, 1435, 1436, 7, 20, 0, 0, 1436, 332, 1, 0, 0, 0, 1437, 1438, 7, 3, 0, 0, 1438, 1439, 7, 0, 0, 0, 1439, 1440, 7, 7, 0, 0, 1440, 1441, 7, 20, 0, 0, 1441, 334, 1, 0, 0, 0, 1442, 1443, 7, 3, 0, 0, 1443, 1444, 7, 2, 0, 0, 1444, 1445, 7, 19, 0, 0, 1445, 1446, 5, 95, 0, 0, 1446, 1447, 7, 7, 0, 0, 1447, 1448, 7, 16, 0, 0, 1448, 1449, 7, 17, 0, 0, 1449, 1450, 7, 1, 0, 0, 1450, 1451, 7, 10, 0, 0, 1451, 1452, 7, 3, 0, 0, 1452, 336, 1, 0, 0, 0, 1453, 1454, 7, 18, 0, 0, 1454, 1455, 7, 10, 0, 0, 1455, 1456, 7, 7, 0, 0, 1456, 1457, 7, 10, 0, 0, 1457, 1458, 7, 3, 0, 0, 1458, 1459, 7, 0, 0, 0, 1459, 1460, 7, 4, 0, 0, 1460, 1461, 7, 10, 0, 0, 1461, 1462, 7, 8, 0, 0, 1462, 338, 1, 0, 0, 0, 1463, 1464, 7, 0, 0, 0, 1464, 1465, 7, 11, 0, 0, 1465, 1466, 7, 19, 0, 0, 1466, 1467, 7, 0, 0, 0, 1467, 1468, 7, 12, 0, 0, 1468, 1469, 7, 14, 0, 0, 1469, 340, 1, 0, 0, 0, 1470, 1471, 7, 14, 0, 0, 1471, 1472, 7, 4, 0, 0, 1472, 1473, 7, 2, 0, 0, 1473, 1474, 7, 3, 0, 0, 1474, 1475, 7, 10, 0, 0, 1475, 1476, 7, 8, 0, 0, 1476, 342, 1, 0, 0, 0, 1477, 1478, 7, 4, 0, 0, 1478, 1479, 7, 3, 0, 0, 1479, 1480, 7, 16, 0, 0, 1480, 1481, 7, 10, 0, 0, 1481, 344, 1, 0, 0, 0, 1482, 1483, 7, 9, 0, 0, 1483, 1484, 7, 0, 0, 0, 1484, 1485, 7, 11, 0, 0, 1485, 1486, 7, 14, 0, 0, 1486, 1487, 7, 10, 0, 0, 1487, 346, 1, 0, 0, 0, 1488, 1489, 7, 19, 0, 0, 1489, 1490, 7, 6, 0, 0, 1490, 1491, 7, 7, 0, 0, 1491, 1492, 7, 8, 0, 0, 1492, 1493, 7, 2, 0, 0, 1493, 1494, 7, 19, 0, 0, 1494, 348, 1, 0, 0, 0, 1495, 1496, 7, 7, 0, 0, 1496, 1497, 7, 16, 0, 0, 1497, 1498, 7, 11, 0, 0, 1498, 1499, 7, 11, 0, 0, 1499, 1500, 7, 14, 0, 0, 1500, 350, 1, 0, 0, 0, 1501, 1502, 7, 9, 0, 0, 1502, 1503, 7, 6, 0, 0, 1503, 1504, 7, 3, 0, 0, 1504, 1505, 7, 14, 0, 0, 1505, 1506, 7, 4, 0, 0, 1506, 352, 1, 0, 0, 0, 1507, 1508, 7, 11, 0, 0, 1508, 1509, 7, 0, 0, 0, 1509, 1510, 7, 14, 0, 0, 1510, 1511, 7, 4, 0, 0, 1511, 354, 1, 0, 0, 0, 1512, 1513, 7, 9, 0, 0, 1513, 1514, 7, 6, 0, 0, 1514, 1515, 7, 11, 0, 0, 1515, 1516, 7, 4, 0, 0, 1516, 1517, 7, 10, 0, 0, 1517, 1518, 7, 3, 0, 0, 1518, 356, 1, 0, 0, 0, 1519, 1520, 7, 18, 0, 0, 1520, 1521, 7, 3, 0, 0, 1521, 1522, 7, 2, 0, 0, 1522, 1523, 7, 16, 0, 0, 1523, 1524, 7, 21, 0, 0, 1524, 1525, 7, 14, 0, 0, 1525, 358, 1, 0, 0, 0, 1526, 1527, 7, 10, 0, 0, 1527, 1528, 7, 22, 0, 0, 1528, 1529, 7, 5, 0, 0, 1529, 1530, 7, 11, 0, 0, 1530, 1531, 7, 16, 0, 0, 1531, 1532, 7, 8, 0, 0, 1532, 1533, 7, 10, 0, 0, 1533, 360, 1, 0, 0, 0, 1534, 1535, 7, 4, 0, 0, 1535, 1536, 7, 6, 0, 0, 1536, 1537, 7, 10, 0, 0, 1537, 1538, 7, 14, 0, 0, 1538, 362, 1, 0, 0, 0, 1539, 1540, 7, 2, 0, 0, 1540, 1541, 7, 4, 0, 0, 1541, 1542, 7, 15, 0, 0, 1542, 1543, 7, 10, 0, 0, 1543, 1544, 7, 3, 0, 0, 1544, 1545, 7, 14, 0, 0, 1545, 364, 1, 0, 0, 0, 1546, 1547, 7, 8, 0, 0, 1547, 1548, 7, 2, 0, 0, 1548, 366, 1, 0, 0, 0, 1549, 1550, 7, 7, 0, 0, 1550, 1551, 7, 2, 0, 0, 1551, 1552, 7, 4, 0, 0, 1552, 1553, 7, 15, 0, 0, 1553, 1554, 7, 6, 0, 0, 1554, 1555, 7, 7, 0, 0, 1555, 1556, 7, 18, 0, 0, 1556, 368, 1, 0, 0, 0, 1557, 1563, 5, 34, 0, 0, 1558, 1562, 8, 26, 0, 0, 1559, 1560, 5, 34, 0, 0, 1560, 1562, 5, 34, 0, 0, 1561, 1558, 1, 0, 0, 0, 1561, 1559, 1, 0, 0, 0, 1562, 1565, 1, 0, 0, 0, 1563, 1561, 1, 0, 0, 0, 1563, 1564, 1, 0, 0, 0, 1564, 1566, 1, 0, 0, 0, 1565, 1563, 1, 0, 0, 0, 1566, 1593, 5, 34, 0, 0, 1567, 1573, 5, 96, 0, 0, 1568, 1572, 8, 27, 0, 0, 1569, 1570, 5, 96, 0, 0, 1570, 1572, 5, 96, 0, 0, 1571, 1568, 1, 0, 0, 0, 1571, 1569, 1, 0, 0, 0, 1572, 1575, 1, 0, 0, 0, 1573, 1571, 1, 0, 0, 0, 1573, 1574, 1, 0, 0, 0, 1574, 1576, 1, 0, 0, 0, 1575, 1573, 1, 0, 0, 0, 1576, 1593, 5, 96, 0, 0, 1577, 1581, 5, 91, 0, 0, 1578, 1580, 8, 28, 0, 0, 1579, 1578, 1, 0, 0, 0, 1580, 1583, 1, 0, 0, 0, 1581, 1579, 1, 0, 0, 0, 1581, 1582, 1, 0, 0, 0, 1582, 1584, 1, 0, 0, 0, 1583, 1581, 1, 0, 0, 0, 1584, 1593, 5, 93, 0, 0, 1585, 1589, 7, 29, 0, 0, 1586, 1588, 7, 30, 0, 0, 1587, 1586, 1, 0, 0, 0, 1588, 1591, 1, 0, 0, 0, 1589, 1587, 1, 0, 0, 0, 1589, 1590, 1, 0, 0, 0, 1590, 1593, 1, 0, 0, 0, 1591, 1589, 1, 0, 0, 0, 1592, 1557, 1, 0, 0, 0, 1592, 1567, 1, 0, 0, 0, 1592, 1577, 1, 0, 0, 0, 1592, 1585, 1, 0, 0, 0, 1593, 370, 1, 0, 0, 0, 1594, 1596, 3, 389, 194, 0, 1595, 1594, 1, 0, 0, 0, 1596, 1597, 1, 0, 0, 0, 1597, 1595, 1, 0, 0, 0, 1597, 1598, 1, 0, 0, 0, 1598, 1606, 1, 0, 0, 0, 1599, 1603, 5, 46, 0, 0, 1600, 1602, 3, 389, 194, 0, 1601, 1600, 1, 0, 0, 0, 1602, 1605, 1, 0, 0, 0, 1603, 1601, 1, 0, 0, 0, 1603, 1604, 1, 0, 0, 0, 1604, 1607, 1, 0, 0, 0, 1605, 1603, 1, 0, 0, 0, 1606, 1599, 1, 0, 0, 0, 1606, 1607, 1, 0, 0, 0, 1607, 1615, 1, 0, 0, 0, 1608, 1610, 5, 46, 0, 0, 1609, 1611, 3, 389, 194, 0, 1610, 1609, 1, 0, 0, 0, 1611, 1612, 1, 0, 0, 0, 1612, 1610, 1, 0, 0, 0, 1612, 1613, 1, 0, 0, 0, 1613, 1615, 1, 0, 0, 0, 1614, 1595, 1, 0, 0, 0, 1614, 1608, 1, 0, 0, 0, 1615, 1625, 1, 0, 0, 0, 1616, 1618, 7, 10, 0, 0, 1617, 1619, 7, 31, 0, 0, 1618, 1617, 1, 0, 0, 0, 1618, 1619, 1, 0, 0, 0, 1619, 1621, 1, 0, 0, 0, 1620, 1622, 3, 389, 194, 0, 1621, 1620, 1, 0, 0, 0, 1622, 1623, 1, 0, 0, 0, 1623, 1621, 1, 0, 0, 0, 1623, 1624, 1, 0, 0, 0, 1624, 1626, 1, 0, 0, 0, 1625, 1616, 1, 0, 0, 0, 1625, 1626, 1, 0, 0, 0, 1626, 1636, 1, 0, 0, 0, 1627, 1628, 5, 48, 0, 0, 1628, 1629, 7, 22, 0, 0, 1629, 1631, 1, 0, 0, 0, 1630, 1632, 3, 387, 193, 0, 1631, 1630, 1, 0, 0, 0, 1632, 1633, 1, 0, 0, 0, 1633, 1631, 1, 0, 0, 0, 1633, 1634, 1, 0, 0, 0, 1634, 1636, 1, 0, 0, 0, 1635, 1614, 1, 0, 0, 0, 1635, 1627, 1, 0, 0, 0, 1636, 372, 1, 0, 0, 0, 1637, 1641, 5, 63, 0, 0, 1638, 1640, 3, 389, 194, 0, 1639, 1638, 1, 0, 0, 0, 1640, 1643, 1, 0, 0, 0, 1641, 1639, 1, 0, 0, 0, 1641, 1642, 1, 0, 0, 0, 1642, 1647, 1, 0, 0, 0, 1643, 1641, 1, 0, 0, 0, 1644, 1645, 7, 32, 0, 0, 1645, 1647, 3, 369, 184, 0, 1646, 1637, 1, 0, 0, 0, 1646, 1644, 1, 0, 0, 0, 1647, 374, 1, 0, 0, 0, 1648, 1654, 5, 39, 0, 0, 1649, 1653, 8, 33, 0, 0, 1650, 1651, 5, 39, 0, 0, 1651, 1653, 5, 39, 0, 0, 1652, 1649, 1, 0, 0, 0, 1652, 1650, 1, 0, 0, 0, 1653, 1656, 1, 0, 0, 0, 1654, 1652, 1, 0, 0, 0, 1654, 1655, 1, 0, 0, 0, 1655, 1657, 1, 0, 0, 0, 1656, 1654, 1, 0, 0, 0, 1657, 1658, 5, 39, 0, 0, 1658, 376, 1, 0, 0, 0, 1659, 1660, 7, 22, 0, 0, 1660, 1661, 3, 375, 187, 0, 1661, 378, 1, 0, 0, 0, 1662, 1663, 5, 45, 0, 0, 1663, 1664, 5, 45, 0, 0, 1664, 1668, 1, 0, 0, 0, 1665, 1667, 8, 34, 0, 0, 1666, 1665, 1, 0, 0, 0, 1667, 1670, 1, 0, 0, 0, 1668, 1666, 1, 0, 0, 0, 1668, 1669, 1, 0, 0, 0, 1669, 1676, 1, 0, 0, 0, 1670, 1668, 1, 0, 0, 0, 1671, 1673, 5, 13, 0, 0, 1672, 1671, 1, 0, 0, 0, 1672, 1673, 1, 0, 0, 0, 1673, 1674, 1, 0, 0, 0, 1674, 1677, 5, 10, 0, 0, 1675, 1677, 5, 0, 0, 1, 1676, 1672, 1, 0, 0, 0, 1676, 1675, 1, 0, 0, 0, 1677, 1678, 1, 0, 0, 0, 1678, 1679, 6, 189, 0, 0, 1679, 380, 1, 0, 0, 0, 1680, 1681, 5, 47, 0, 0, 1681, 1682, 5, 42, 0, 0, 1682, 1686, 1, 0, 0, 0, 1683, 1685, 9, 0, 0, 0, 1684, 1683, 1, 0, 0, 0, 1685, 1688, 1, 0, 0, 0, 1686, 1687, 1, 0, 0, 0, 1686, 1684, 1, 0, 0, 0, 1687, 1689, 1, 0, 0, 0, 1688, 1686, 1, 0, 0, 0, 1689, 1690, 5, 42, 0, 0, 1690, 1691, 5, 47, 0, 0, 1691, 1692, 1, 0, 0, 0, 1692, 1693, 6, 190, 0, 0, 1693, 382, 1, 0, 0, 0, 1694, 1695, 7, 35, 0, 0, 1695, 1696, 1, 0, 0, 0, 1696, 1697, 6, 191, 0, 0, 1697, 384, 1, 0, 0, 0, 1698, 1699, 9, 0, 0, 0, 1699, 386, 1, 0, 0, 0, 1700, 1701, 7, 36, 0, 0, 1701, 388, 1, 0, 0, 0, 1702, 1703, 7, 37, 0, 0, 1703, 390, 1, 0, 0, 0, 26, 0, 1561, 1563, 1571, 1573, 1581, 1589, 1592, 1597, 1603, 1606, 1612, 1614, 1618, 1623, 1625, 1633, 1635, 1641, 1646, 1652, 1654, 1668, 1672, 1676, 1686, 1, 0, 1, 0] \ No newline at end of file diff --git a/vendor/github.com/libsql/sqlite-antlr4-parser/sqliteparser/SQLiteLexer.tokens b/vendor/github.com/libsql/sqlite-antlr4-parser/sqliteparser/SQLiteLexer.tokens index 26a91841ac..4418b51e12 100644 --- a/vendor/github.com/libsql/sqlite-antlr4-parser/sqliteparser/SQLiteLexer.tokens +++ b/vendor/github.com/libsql/sqlite-antlr4-parser/sqliteparser/SQLiteLexer.tokens @@ -1,377 +1,377 @@ -SCOL=1 -DOT=2 -OPEN_PAR=3 -CLOSE_PAR=4 -COMMA=5 -ASSIGN=6 -STAR=7 -PLUS=8 -MINUS=9 -TILDE=10 -PIPE2=11 -DIV=12 -MOD=13 -LT2=14 -GT2=15 -AMP=16 -PIPE=17 -LT=18 -LT_EQ=19 -GT=20 -GT_EQ=21 -EQ=22 -NOT_EQ1=23 -NOT_EQ2=24 -ABORT_=25 -ACTION_=26 -ADD_=27 -AFTER_=28 -ALL_=29 -ALTER_=30 -ANALYZE_=31 -AND_=32 -AS_=33 -ASC_=34 -ATTACH_=35 -AUTOINCREMENT_=36 -BEFORE_=37 -BEGIN_=38 -BETWEEN_=39 -BY_=40 -CASCADE_=41 -CASE_=42 -CAST_=43 -CHECK_=44 -COLLATE_=45 -COLUMN_=46 -COMMIT_=47 -CONFLICT_=48 -CONSTRAINT_=49 -CREATE_=50 -CROSS_=51 -CURRENT_DATE_=52 -CURRENT_TIME_=53 -CURRENT_TIMESTAMP_=54 -DATABASE_=55 -DEFAULT_=56 -DEFERRABLE_=57 -DEFERRED_=58 -DELETE_=59 -DESC_=60 -DETACH_=61 -DISTINCT_=62 -DROP_=63 -EACH_=64 -ELSE_=65 -END_=66 -ESCAPE_=67 -EXCEPT_=68 -EXCLUSIVE_=69 -EXISTS_=70 -EXPLAIN_=71 -FAIL_=72 -FOR_=73 -FOREIGN_=74 -FROM_=75 -FULL_=76 -GLOB_=77 -GROUP_=78 -HAVING_=79 -IF_=80 -IGNORE_=81 -IMMEDIATE_=82 -IN_=83 -INDEX_=84 -INDEXED_=85 -INITIALLY_=86 -INNER_=87 -INSERT_=88 -INSTEAD_=89 -INTERSECT_=90 -INTO_=91 -IS_=92 -ISNULL_=93 -JOIN_=94 -KEY_=95 -LEFT_=96 -LIKE_=97 -LIMIT_=98 -MATCH_=99 -NATURAL_=100 -NO_=101 -NOT_=102 -NOTNULL_=103 -NULL_=104 -OF_=105 -OFFSET_=106 -ON_=107 -OR_=108 -ORDER_=109 -OUTER_=110 -PLAN_=111 -PRAGMA_=112 -PRIMARY_=113 -QUERY_=114 -RAISE_=115 -RECURSIVE_=116 -REFERENCES_=117 -REGEXP_=118 -REINDEX_=119 -RELEASE_=120 -RENAME_=121 -REPLACE_=122 -RESTRICT_=123 -RETURNING_=124 -RIGHT_=125 -ROLLBACK_=126 -ROW_=127 -ROWS_=128 -SAVEPOINT_=129 -SELECT_=130 -SET_=131 -TABLE_=132 -TEMP_=133 -TEMPORARY_=134 -THEN_=135 -TO_=136 -TRANSACTION_=137 -TRIGGER_=138 -UNION_=139 -UNIQUE_=140 -UPDATE_=141 -USING_=142 -VACUUM_=143 -VALUES_=144 -VIEW_=145 -VIRTUAL_=146 -WHEN_=147 -WHERE_=148 -WITH_=149 -WITHOUT_=150 -FIRST_VALUE_=151 -OVER_=152 -PARTITION_=153 -RANGE_=154 -PRECEDING_=155 -UNBOUNDED_=156 -CURRENT_=157 -FOLLOWING_=158 -CUME_DIST_=159 -DENSE_RANK_=160 -LAG_=161 -LAST_VALUE_=162 -LEAD_=163 -NTH_VALUE_=164 -NTILE_=165 -PERCENT_RANK_=166 -RANK_=167 -ROW_NUMBER_=168 -GENERATED_=169 -ALWAYS_=170 -STORED_=171 -TRUE_=172 -FALSE_=173 -WINDOW_=174 -NULLS_=175 -FIRST_=176 -LAST_=177 -FILTER_=178 -GROUPS_=179 -EXCLUDE_=180 -TIES_=181 -OTHERS_=182 -DO_=183 -NOTHING_=184 -IDENTIFIER=185 -NUMERIC_LITERAL=186 -BIND_PARAMETER=187 -STRING_LITERAL=188 -BLOB_LITERAL=189 -SINGLE_LINE_COMMENT=190 -MULTILINE_COMMENT=191 -SPACES=192 -UNEXPECTED_CHAR=193 -';'=1 -'.'=2 -'('=3 -')'=4 -','=5 -'='=6 -'*'=7 -'+'=8 -'-'=9 -'~'=10 -'||'=11 -'/'=12 -'%'=13 -'<<'=14 -'>>'=15 -'&'=16 -'|'=17 -'<'=18 -'<='=19 -'>'=20 -'>='=21 -'=='=22 -'!='=23 -'<>'=24 -'ABORT'=25 -'ACTION'=26 -'ADD'=27 -'AFTER'=28 -'ALL'=29 -'ALTER'=30 -'ANALYZE'=31 -'AND'=32 -'AS'=33 -'ASC'=34 -'ATTACH'=35 -'AUTOINCREMENT'=36 -'BEFORE'=37 -'BEGIN'=38 -'BETWEEN'=39 -'BY'=40 -'CASCADE'=41 -'CASE'=42 -'CAST'=43 -'CHECK'=44 -'COLLATE'=45 -'COLUMN'=46 -'COMMIT'=47 -'CONFLICT'=48 -'CONSTRAINT'=49 -'CREATE'=50 -'CROSS'=51 -'CURRENT_DATE'=52 -'CURRENT_TIME'=53 -'CURRENT_TIMESTAMP'=54 -'DATABASE'=55 -'DEFAULT'=56 -'DEFERRABLE'=57 -'DEFERRED'=58 -'DELETE'=59 -'DESC'=60 -'DETACH'=61 -'DISTINCT'=62 -'DROP'=63 -'EACH'=64 -'ELSE'=65 -'END'=66 -'ESCAPE'=67 -'EXCEPT'=68 -'EXCLUSIVE'=69 -'EXISTS'=70 -'EXPLAIN'=71 -'FAIL'=72 -'FOR'=73 -'FOREIGN'=74 -'FROM'=75 -'FULL'=76 -'GLOB'=77 -'GROUP'=78 -'HAVING'=79 -'IF'=80 -'IGNORE'=81 -'IMMEDIATE'=82 -'IN'=83 -'INDEX'=84 -'INDEXED'=85 -'INITIALLY'=86 -'INNER'=87 -'INSERT'=88 -'INSTEAD'=89 -'INTERSECT'=90 -'INTO'=91 -'IS'=92 -'ISNULL'=93 -'JOIN'=94 -'KEY'=95 -'LEFT'=96 -'LIKE'=97 -'LIMIT'=98 -'MATCH'=99 -'NATURAL'=100 -'NO'=101 -'NOT'=102 -'NOTNULL'=103 -'NULL'=104 -'OF'=105 -'OFFSET'=106 -'ON'=107 -'OR'=108 -'ORDER'=109 -'OUTER'=110 -'PLAN'=111 -'PRAGMA'=112 -'PRIMARY'=113 -'QUERY'=114 -'RAISE'=115 -'RECURSIVE'=116 -'REFERENCES'=117 -'REGEXP'=118 -'REINDEX'=119 -'RELEASE'=120 -'RENAME'=121 -'REPLACE'=122 -'RESTRICT'=123 -'RETURNING'=124 -'RIGHT'=125 -'ROLLBACK'=126 -'ROW'=127 -'ROWS'=128 -'SAVEPOINT'=129 -'SELECT'=130 -'SET'=131 -'TABLE'=132 -'TEMP'=133 -'TEMPORARY'=134 -'THEN'=135 -'TO'=136 -'TRANSACTION'=137 -'TRIGGER'=138 -'UNION'=139 -'UNIQUE'=140 -'UPDATE'=141 -'USING'=142 -'VACUUM'=143 -'VALUES'=144 -'VIEW'=145 -'VIRTUAL'=146 -'WHEN'=147 -'WHERE'=148 -'WITH'=149 -'WITHOUT'=150 -'FIRST_VALUE'=151 -'OVER'=152 -'PARTITION'=153 -'RANGE'=154 -'PRECEDING'=155 -'UNBOUNDED'=156 -'CURRENT'=157 -'FOLLOWING'=158 -'CUME_DIST'=159 -'DENSE_RANK'=160 -'LAG'=161 -'LAST_VALUE'=162 -'LEAD'=163 -'NTH_VALUE'=164 -'NTILE'=165 -'PERCENT_RANK'=166 -'RANK'=167 -'ROW_NUMBER'=168 -'GENERATED'=169 -'ALWAYS'=170 -'STORED'=171 -'TRUE'=172 -'FALSE'=173 -'WINDOW'=174 -'NULLS'=175 -'FIRST'=176 -'LAST'=177 -'FILTER'=178 -'GROUPS'=179 -'EXCLUDE'=180 -'TIES'=181 -'OTHERS'=182 -'DO'=183 -'NOTHING'=184 +SCOL=1 +DOT=2 +OPEN_PAR=3 +CLOSE_PAR=4 +COMMA=5 +ASSIGN=6 +STAR=7 +PLUS=8 +MINUS=9 +TILDE=10 +PIPE2=11 +DIV=12 +MOD=13 +LT2=14 +GT2=15 +AMP=16 +PIPE=17 +LT=18 +LT_EQ=19 +GT=20 +GT_EQ=21 +EQ=22 +NOT_EQ1=23 +NOT_EQ2=24 +ABORT_=25 +ACTION_=26 +ADD_=27 +AFTER_=28 +ALL_=29 +ALTER_=30 +ANALYZE_=31 +AND_=32 +AS_=33 +ASC_=34 +ATTACH_=35 +AUTOINCREMENT_=36 +BEFORE_=37 +BEGIN_=38 +BETWEEN_=39 +BY_=40 +CASCADE_=41 +CASE_=42 +CAST_=43 +CHECK_=44 +COLLATE_=45 +COLUMN_=46 +COMMIT_=47 +CONFLICT_=48 +CONSTRAINT_=49 +CREATE_=50 +CROSS_=51 +CURRENT_DATE_=52 +CURRENT_TIME_=53 +CURRENT_TIMESTAMP_=54 +DATABASE_=55 +DEFAULT_=56 +DEFERRABLE_=57 +DEFERRED_=58 +DELETE_=59 +DESC_=60 +DETACH_=61 +DISTINCT_=62 +DROP_=63 +EACH_=64 +ELSE_=65 +END_=66 +ESCAPE_=67 +EXCEPT_=68 +EXCLUSIVE_=69 +EXISTS_=70 +EXPLAIN_=71 +FAIL_=72 +FOR_=73 +FOREIGN_=74 +FROM_=75 +FULL_=76 +GLOB_=77 +GROUP_=78 +HAVING_=79 +IF_=80 +IGNORE_=81 +IMMEDIATE_=82 +IN_=83 +INDEX_=84 +INDEXED_=85 +INITIALLY_=86 +INNER_=87 +INSERT_=88 +INSTEAD_=89 +INTERSECT_=90 +INTO_=91 +IS_=92 +ISNULL_=93 +JOIN_=94 +KEY_=95 +LEFT_=96 +LIKE_=97 +LIMIT_=98 +MATCH_=99 +NATURAL_=100 +NO_=101 +NOT_=102 +NOTNULL_=103 +NULL_=104 +OF_=105 +OFFSET_=106 +ON_=107 +OR_=108 +ORDER_=109 +OUTER_=110 +PLAN_=111 +PRAGMA_=112 +PRIMARY_=113 +QUERY_=114 +RAISE_=115 +RECURSIVE_=116 +REFERENCES_=117 +REGEXP_=118 +REINDEX_=119 +RELEASE_=120 +RENAME_=121 +REPLACE_=122 +RESTRICT_=123 +RETURNING_=124 +RIGHT_=125 +ROLLBACK_=126 +ROW_=127 +ROWS_=128 +SAVEPOINT_=129 +SELECT_=130 +SET_=131 +TABLE_=132 +TEMP_=133 +TEMPORARY_=134 +THEN_=135 +TO_=136 +TRANSACTION_=137 +TRIGGER_=138 +UNION_=139 +UNIQUE_=140 +UPDATE_=141 +USING_=142 +VACUUM_=143 +VALUES_=144 +VIEW_=145 +VIRTUAL_=146 +WHEN_=147 +WHERE_=148 +WITH_=149 +WITHOUT_=150 +FIRST_VALUE_=151 +OVER_=152 +PARTITION_=153 +RANGE_=154 +PRECEDING_=155 +UNBOUNDED_=156 +CURRENT_=157 +FOLLOWING_=158 +CUME_DIST_=159 +DENSE_RANK_=160 +LAG_=161 +LAST_VALUE_=162 +LEAD_=163 +NTH_VALUE_=164 +NTILE_=165 +PERCENT_RANK_=166 +RANK_=167 +ROW_NUMBER_=168 +GENERATED_=169 +ALWAYS_=170 +STORED_=171 +TRUE_=172 +FALSE_=173 +WINDOW_=174 +NULLS_=175 +FIRST_=176 +LAST_=177 +FILTER_=178 +GROUPS_=179 +EXCLUDE_=180 +TIES_=181 +OTHERS_=182 +DO_=183 +NOTHING_=184 +IDENTIFIER=185 +NUMERIC_LITERAL=186 +BIND_PARAMETER=187 +STRING_LITERAL=188 +BLOB_LITERAL=189 +SINGLE_LINE_COMMENT=190 +MULTILINE_COMMENT=191 +SPACES=192 +UNEXPECTED_CHAR=193 +';'=1 +'.'=2 +'('=3 +')'=4 +','=5 +'='=6 +'*'=7 +'+'=8 +'-'=9 +'~'=10 +'||'=11 +'/'=12 +'%'=13 +'<<'=14 +'>>'=15 +'&'=16 +'|'=17 +'<'=18 +'<='=19 +'>'=20 +'>='=21 +'=='=22 +'!='=23 +'<>'=24 +'ABORT'=25 +'ACTION'=26 +'ADD'=27 +'AFTER'=28 +'ALL'=29 +'ALTER'=30 +'ANALYZE'=31 +'AND'=32 +'AS'=33 +'ASC'=34 +'ATTACH'=35 +'AUTOINCREMENT'=36 +'BEFORE'=37 +'BEGIN'=38 +'BETWEEN'=39 +'BY'=40 +'CASCADE'=41 +'CASE'=42 +'CAST'=43 +'CHECK'=44 +'COLLATE'=45 +'COLUMN'=46 +'COMMIT'=47 +'CONFLICT'=48 +'CONSTRAINT'=49 +'CREATE'=50 +'CROSS'=51 +'CURRENT_DATE'=52 +'CURRENT_TIME'=53 +'CURRENT_TIMESTAMP'=54 +'DATABASE'=55 +'DEFAULT'=56 +'DEFERRABLE'=57 +'DEFERRED'=58 +'DELETE'=59 +'DESC'=60 +'DETACH'=61 +'DISTINCT'=62 +'DROP'=63 +'EACH'=64 +'ELSE'=65 +'END'=66 +'ESCAPE'=67 +'EXCEPT'=68 +'EXCLUSIVE'=69 +'EXISTS'=70 +'EXPLAIN'=71 +'FAIL'=72 +'FOR'=73 +'FOREIGN'=74 +'FROM'=75 +'FULL'=76 +'GLOB'=77 +'GROUP'=78 +'HAVING'=79 +'IF'=80 +'IGNORE'=81 +'IMMEDIATE'=82 +'IN'=83 +'INDEX'=84 +'INDEXED'=85 +'INITIALLY'=86 +'INNER'=87 +'INSERT'=88 +'INSTEAD'=89 +'INTERSECT'=90 +'INTO'=91 +'IS'=92 +'ISNULL'=93 +'JOIN'=94 +'KEY'=95 +'LEFT'=96 +'LIKE'=97 +'LIMIT'=98 +'MATCH'=99 +'NATURAL'=100 +'NO'=101 +'NOT'=102 +'NOTNULL'=103 +'NULL'=104 +'OF'=105 +'OFFSET'=106 +'ON'=107 +'OR'=108 +'ORDER'=109 +'OUTER'=110 +'PLAN'=111 +'PRAGMA'=112 +'PRIMARY'=113 +'QUERY'=114 +'RAISE'=115 +'RECURSIVE'=116 +'REFERENCES'=117 +'REGEXP'=118 +'REINDEX'=119 +'RELEASE'=120 +'RENAME'=121 +'REPLACE'=122 +'RESTRICT'=123 +'RETURNING'=124 +'RIGHT'=125 +'ROLLBACK'=126 +'ROW'=127 +'ROWS'=128 +'SAVEPOINT'=129 +'SELECT'=130 +'SET'=131 +'TABLE'=132 +'TEMP'=133 +'TEMPORARY'=134 +'THEN'=135 +'TO'=136 +'TRANSACTION'=137 +'TRIGGER'=138 +'UNION'=139 +'UNIQUE'=140 +'UPDATE'=141 +'USING'=142 +'VACUUM'=143 +'VALUES'=144 +'VIEW'=145 +'VIRTUAL'=146 +'WHEN'=147 +'WHERE'=148 +'WITH'=149 +'WITHOUT'=150 +'FIRST_VALUE'=151 +'OVER'=152 +'PARTITION'=153 +'RANGE'=154 +'PRECEDING'=155 +'UNBOUNDED'=156 +'CURRENT'=157 +'FOLLOWING'=158 +'CUME_DIST'=159 +'DENSE_RANK'=160 +'LAG'=161 +'LAST_VALUE'=162 +'LEAD'=163 +'NTH_VALUE'=164 +'NTILE'=165 +'PERCENT_RANK'=166 +'RANK'=167 +'ROW_NUMBER'=168 +'GENERATED'=169 +'ALWAYS'=170 +'STORED'=171 +'TRUE'=172 +'FALSE'=173 +'WINDOW'=174 +'NULLS'=175 +'FIRST'=176 +'LAST'=177 +'FILTER'=178 +'GROUPS'=179 +'EXCLUDE'=180 +'TIES'=181 +'OTHERS'=182 +'DO'=183 +'NOTHING'=184 diff --git a/vendor/github.com/libsql/sqlite-antlr4-parser/sqliteparser/SQLiteParser.interp b/vendor/github.com/libsql/sqlite-antlr4-parser/sqliteparser/SQLiteParser.interp index 6bcddfa43e..909ec4031e 100644 --- a/vendor/github.com/libsql/sqlite-antlr4-parser/sqliteparser/SQLiteParser.interp +++ b/vendor/github.com/libsql/sqlite-antlr4-parser/sqliteparser/SQLiteParser.interp @@ -1,510 +1,510 @@ -token literal names: -null -';' -'.' -'(' -')' -',' -'=' -'*' -'+' -'-' -'~' -'||' -'/' -'%' -'<<' -'>>' -'&' -'|' -'<' -'<=' -'>' -'>=' -'==' -'!=' -'<>' -'ABORT' -'ACTION' -'ADD' -'AFTER' -'ALL' -'ALTER' -'ANALYZE' -'AND' -'AS' -'ASC' -'ATTACH' -'AUTOINCREMENT' -'BEFORE' -'BEGIN' -'BETWEEN' -'BY' -'CASCADE' -'CASE' -'CAST' -'CHECK' -'COLLATE' -'COLUMN' -'COMMIT' -'CONFLICT' -'CONSTRAINT' -'CREATE' -'CROSS' -'CURRENT_DATE' -'CURRENT_TIME' -'CURRENT_TIMESTAMP' -'DATABASE' -'DEFAULT' -'DEFERRABLE' -'DEFERRED' -'DELETE' -'DESC' -'DETACH' -'DISTINCT' -'DROP' -'EACH' -'ELSE' -'END' -'ESCAPE' -'EXCEPT' -'EXCLUSIVE' -'EXISTS' -'EXPLAIN' -'FAIL' -'FOR' -'FOREIGN' -'FROM' -'FULL' -'GLOB' -'GROUP' -'HAVING' -'IF' -'IGNORE' -'IMMEDIATE' -'IN' -'INDEX' -'INDEXED' -'INITIALLY' -'INNER' -'INSERT' -'INSTEAD' -'INTERSECT' -'INTO' -'IS' -'ISNULL' -'JOIN' -'KEY' -'LEFT' -'LIKE' -'LIMIT' -'MATCH' -'NATURAL' -'NO' -'NOT' -'NOTNULL' -'NULL' -'OF' -'OFFSET' -'ON' -'OR' -'ORDER' -'OUTER' -'PLAN' -'PRAGMA' -'PRIMARY' -'QUERY' -'RAISE' -'RECURSIVE' -'REFERENCES' -'REGEXP' -'REINDEX' -'RELEASE' -'RENAME' -'REPLACE' -'RESTRICT' -'RETURNING' -'RIGHT' -'ROLLBACK' -'ROW' -'ROWS' -'SAVEPOINT' -'SELECT' -'SET' -'TABLE' -'TEMP' -'TEMPORARY' -'THEN' -'TO' -'TRANSACTION' -'TRIGGER' -'UNION' -'UNIQUE' -'UPDATE' -'USING' -'VACUUM' -'VALUES' -'VIEW' -'VIRTUAL' -'WHEN' -'WHERE' -'WITH' -'WITHOUT' -'FIRST_VALUE' -'OVER' -'PARTITION' -'RANGE' -'PRECEDING' -'UNBOUNDED' -'CURRENT' -'FOLLOWING' -'CUME_DIST' -'DENSE_RANK' -'LAG' -'LAST_VALUE' -'LEAD' -'NTH_VALUE' -'NTILE' -'PERCENT_RANK' -'RANK' -'ROW_NUMBER' -'GENERATED' -'ALWAYS' -'STORED' -'TRUE' -'FALSE' -'WINDOW' -'NULLS' -'FIRST' -'LAST' -'FILTER' -'GROUPS' -'EXCLUDE' -'TIES' -'OTHERS' -'DO' -'NOTHING' -null -null -null -null -null -null -null -null -null - -token symbolic names: -null -SCOL -DOT -OPEN_PAR -CLOSE_PAR -COMMA -ASSIGN -STAR -PLUS -MINUS -TILDE -PIPE2 -DIV -MOD -LT2 -GT2 -AMP -PIPE -LT -LT_EQ -GT -GT_EQ -EQ -NOT_EQ1 -NOT_EQ2 -ABORT_ -ACTION_ -ADD_ -AFTER_ -ALL_ -ALTER_ -ANALYZE_ -AND_ -AS_ -ASC_ -ATTACH_ -AUTOINCREMENT_ -BEFORE_ -BEGIN_ -BETWEEN_ -BY_ -CASCADE_ -CASE_ -CAST_ -CHECK_ -COLLATE_ -COLUMN_ -COMMIT_ -CONFLICT_ -CONSTRAINT_ -CREATE_ -CROSS_ -CURRENT_DATE_ -CURRENT_TIME_ -CURRENT_TIMESTAMP_ -DATABASE_ -DEFAULT_ -DEFERRABLE_ -DEFERRED_ -DELETE_ -DESC_ -DETACH_ -DISTINCT_ -DROP_ -EACH_ -ELSE_ -END_ -ESCAPE_ -EXCEPT_ -EXCLUSIVE_ -EXISTS_ -EXPLAIN_ -FAIL_ -FOR_ -FOREIGN_ -FROM_ -FULL_ -GLOB_ -GROUP_ -HAVING_ -IF_ -IGNORE_ -IMMEDIATE_ -IN_ -INDEX_ -INDEXED_ -INITIALLY_ -INNER_ -INSERT_ -INSTEAD_ -INTERSECT_ -INTO_ -IS_ -ISNULL_ -JOIN_ -KEY_ -LEFT_ -LIKE_ -LIMIT_ -MATCH_ -NATURAL_ -NO_ -NOT_ -NOTNULL_ -NULL_ -OF_ -OFFSET_ -ON_ -OR_ -ORDER_ -OUTER_ -PLAN_ -PRAGMA_ -PRIMARY_ -QUERY_ -RAISE_ -RECURSIVE_ -REFERENCES_ -REGEXP_ -REINDEX_ -RELEASE_ -RENAME_ -REPLACE_ -RESTRICT_ -RETURNING_ -RIGHT_ -ROLLBACK_ -ROW_ -ROWS_ -SAVEPOINT_ -SELECT_ -SET_ -TABLE_ -TEMP_ -TEMPORARY_ -THEN_ -TO_ -TRANSACTION_ -TRIGGER_ -UNION_ -UNIQUE_ -UPDATE_ -USING_ -VACUUM_ -VALUES_ -VIEW_ -VIRTUAL_ -WHEN_ -WHERE_ -WITH_ -WITHOUT_ -FIRST_VALUE_ -OVER_ -PARTITION_ -RANGE_ -PRECEDING_ -UNBOUNDED_ -CURRENT_ -FOLLOWING_ -CUME_DIST_ -DENSE_RANK_ -LAG_ -LAST_VALUE_ -LEAD_ -NTH_VALUE_ -NTILE_ -PERCENT_RANK_ -RANK_ -ROW_NUMBER_ -GENERATED_ -ALWAYS_ -STORED_ -TRUE_ -FALSE_ -WINDOW_ -NULLS_ -FIRST_ -LAST_ -FILTER_ -GROUPS_ -EXCLUDE_ -TIES_ -OTHERS_ -DO_ -NOTHING_ -IDENTIFIER -NUMERIC_LITERAL -BIND_PARAMETER -STRING_LITERAL -BLOB_LITERAL -SINGLE_LINE_COMMENT -MULTILINE_COMMENT -SPACES -UNEXPECTED_CHAR - -rule names: -parse -sql_stmt_list -sql_stmt -alter_table_stmt -analyze_stmt -attach_stmt -begin_stmt -commit_stmt -rollback_stmt -savepoint_stmt -release_stmt -create_index_stmt -indexed_column -create_table_stmt -column_def -type_name -column_constraint -signed_number -table_constraint -foreign_key_clause -conflict_clause -create_trigger_stmt -create_view_stmt -create_virtual_table_stmt -with_clause -cte_table_name -recursive_cte -common_table_expression -delete_stmt -delete_stmt_limited -detach_stmt -drop_stmt -expr -raise_function -literal_value -value_row -values_clause -insert_stmt -returning_clause -upsert_clause -pragma_stmt -pragma_value -reindex_stmt -select_stmt -join_clause -select_core -factored_select_stmt -simple_select_stmt -compound_select_stmt -table_or_subquery -result_column -join_operator -join_constraint -compound_operator -update_stmt -column_name_list -update_stmt_limited -qualified_table_name -vacuum_stmt -filter_clause -window_defn -over_clause -frame_spec -frame_clause -simple_function_invocation -aggregate_function_invocation -window_function_invocation -common_table_stmt -order_by_stmt -limit_stmt -ordering_term -asc_desc -frame_left -frame_right -frame_single -window_function -offset -default_value -partition_by -order_by_expr -order_by_expr_asc_desc -expr_asc_desc -initial_select -recursive_select -unary_operator -error_message -module_argument -column_alias -keyword -name -function_name -schema_name -table_name -table_or_index_name -column_name -collation_name -foreign_table -index_name -trigger_name -view_name -module_name -pragma_name -savepoint_name -table_alias -transaction_name -window_name -alias -filename -base_window_name -simple_func -aggregate_func -table_function_name -any_name - - -atn: +token literal names: +null +';' +'.' +'(' +')' +',' +'=' +'*' +'+' +'-' +'~' +'||' +'/' +'%' +'<<' +'>>' +'&' +'|' +'<' +'<=' +'>' +'>=' +'==' +'!=' +'<>' +'ABORT' +'ACTION' +'ADD' +'AFTER' +'ALL' +'ALTER' +'ANALYZE' +'AND' +'AS' +'ASC' +'ATTACH' +'AUTOINCREMENT' +'BEFORE' +'BEGIN' +'BETWEEN' +'BY' +'CASCADE' +'CASE' +'CAST' +'CHECK' +'COLLATE' +'COLUMN' +'COMMIT' +'CONFLICT' +'CONSTRAINT' +'CREATE' +'CROSS' +'CURRENT_DATE' +'CURRENT_TIME' +'CURRENT_TIMESTAMP' +'DATABASE' +'DEFAULT' +'DEFERRABLE' +'DEFERRED' +'DELETE' +'DESC' +'DETACH' +'DISTINCT' +'DROP' +'EACH' +'ELSE' +'END' +'ESCAPE' +'EXCEPT' +'EXCLUSIVE' +'EXISTS' +'EXPLAIN' +'FAIL' +'FOR' +'FOREIGN' +'FROM' +'FULL' +'GLOB' +'GROUP' +'HAVING' +'IF' +'IGNORE' +'IMMEDIATE' +'IN' +'INDEX' +'INDEXED' +'INITIALLY' +'INNER' +'INSERT' +'INSTEAD' +'INTERSECT' +'INTO' +'IS' +'ISNULL' +'JOIN' +'KEY' +'LEFT' +'LIKE' +'LIMIT' +'MATCH' +'NATURAL' +'NO' +'NOT' +'NOTNULL' +'NULL' +'OF' +'OFFSET' +'ON' +'OR' +'ORDER' +'OUTER' +'PLAN' +'PRAGMA' +'PRIMARY' +'QUERY' +'RAISE' +'RECURSIVE' +'REFERENCES' +'REGEXP' +'REINDEX' +'RELEASE' +'RENAME' +'REPLACE' +'RESTRICT' +'RETURNING' +'RIGHT' +'ROLLBACK' +'ROW' +'ROWS' +'SAVEPOINT' +'SELECT' +'SET' +'TABLE' +'TEMP' +'TEMPORARY' +'THEN' +'TO' +'TRANSACTION' +'TRIGGER' +'UNION' +'UNIQUE' +'UPDATE' +'USING' +'VACUUM' +'VALUES' +'VIEW' +'VIRTUAL' +'WHEN' +'WHERE' +'WITH' +'WITHOUT' +'FIRST_VALUE' +'OVER' +'PARTITION' +'RANGE' +'PRECEDING' +'UNBOUNDED' +'CURRENT' +'FOLLOWING' +'CUME_DIST' +'DENSE_RANK' +'LAG' +'LAST_VALUE' +'LEAD' +'NTH_VALUE' +'NTILE' +'PERCENT_RANK' +'RANK' +'ROW_NUMBER' +'GENERATED' +'ALWAYS' +'STORED' +'TRUE' +'FALSE' +'WINDOW' +'NULLS' +'FIRST' +'LAST' +'FILTER' +'GROUPS' +'EXCLUDE' +'TIES' +'OTHERS' +'DO' +'NOTHING' +null +null +null +null +null +null +null +null +null + +token symbolic names: +null +SCOL +DOT +OPEN_PAR +CLOSE_PAR +COMMA +ASSIGN +STAR +PLUS +MINUS +TILDE +PIPE2 +DIV +MOD +LT2 +GT2 +AMP +PIPE +LT +LT_EQ +GT +GT_EQ +EQ +NOT_EQ1 +NOT_EQ2 +ABORT_ +ACTION_ +ADD_ +AFTER_ +ALL_ +ALTER_ +ANALYZE_ +AND_ +AS_ +ASC_ +ATTACH_ +AUTOINCREMENT_ +BEFORE_ +BEGIN_ +BETWEEN_ +BY_ +CASCADE_ +CASE_ +CAST_ +CHECK_ +COLLATE_ +COLUMN_ +COMMIT_ +CONFLICT_ +CONSTRAINT_ +CREATE_ +CROSS_ +CURRENT_DATE_ +CURRENT_TIME_ +CURRENT_TIMESTAMP_ +DATABASE_ +DEFAULT_ +DEFERRABLE_ +DEFERRED_ +DELETE_ +DESC_ +DETACH_ +DISTINCT_ +DROP_ +EACH_ +ELSE_ +END_ +ESCAPE_ +EXCEPT_ +EXCLUSIVE_ +EXISTS_ +EXPLAIN_ +FAIL_ +FOR_ +FOREIGN_ +FROM_ +FULL_ +GLOB_ +GROUP_ +HAVING_ +IF_ +IGNORE_ +IMMEDIATE_ +IN_ +INDEX_ +INDEXED_ +INITIALLY_ +INNER_ +INSERT_ +INSTEAD_ +INTERSECT_ +INTO_ +IS_ +ISNULL_ +JOIN_ +KEY_ +LEFT_ +LIKE_ +LIMIT_ +MATCH_ +NATURAL_ +NO_ +NOT_ +NOTNULL_ +NULL_ +OF_ +OFFSET_ +ON_ +OR_ +ORDER_ +OUTER_ +PLAN_ +PRAGMA_ +PRIMARY_ +QUERY_ +RAISE_ +RECURSIVE_ +REFERENCES_ +REGEXP_ +REINDEX_ +RELEASE_ +RENAME_ +REPLACE_ +RESTRICT_ +RETURNING_ +RIGHT_ +ROLLBACK_ +ROW_ +ROWS_ +SAVEPOINT_ +SELECT_ +SET_ +TABLE_ +TEMP_ +TEMPORARY_ +THEN_ +TO_ +TRANSACTION_ +TRIGGER_ +UNION_ +UNIQUE_ +UPDATE_ +USING_ +VACUUM_ +VALUES_ +VIEW_ +VIRTUAL_ +WHEN_ +WHERE_ +WITH_ +WITHOUT_ +FIRST_VALUE_ +OVER_ +PARTITION_ +RANGE_ +PRECEDING_ +UNBOUNDED_ +CURRENT_ +FOLLOWING_ +CUME_DIST_ +DENSE_RANK_ +LAG_ +LAST_VALUE_ +LEAD_ +NTH_VALUE_ +NTILE_ +PERCENT_RANK_ +RANK_ +ROW_NUMBER_ +GENERATED_ +ALWAYS_ +STORED_ +TRUE_ +FALSE_ +WINDOW_ +NULLS_ +FIRST_ +LAST_ +FILTER_ +GROUPS_ +EXCLUDE_ +TIES_ +OTHERS_ +DO_ +NOTHING_ +IDENTIFIER +NUMERIC_LITERAL +BIND_PARAMETER +STRING_LITERAL +BLOB_LITERAL +SINGLE_LINE_COMMENT +MULTILINE_COMMENT +SPACES +UNEXPECTED_CHAR + +rule names: +parse +sql_stmt_list +sql_stmt +alter_table_stmt +analyze_stmt +attach_stmt +begin_stmt +commit_stmt +rollback_stmt +savepoint_stmt +release_stmt +create_index_stmt +indexed_column +create_table_stmt +column_def +type_name +column_constraint +signed_number +table_constraint +foreign_key_clause +conflict_clause +create_trigger_stmt +create_view_stmt +create_virtual_table_stmt +with_clause +cte_table_name +recursive_cte +common_table_expression +delete_stmt +delete_stmt_limited +detach_stmt +drop_stmt +expr +raise_function +literal_value +value_row +values_clause +insert_stmt +returning_clause +upsert_clause +pragma_stmt +pragma_value +reindex_stmt +select_stmt +join_clause +select_core +factored_select_stmt +simple_select_stmt +compound_select_stmt +table_or_subquery +result_column +join_operator +join_constraint +compound_operator +update_stmt +column_name_list +update_stmt_limited +qualified_table_name +vacuum_stmt +filter_clause +window_defn +over_clause +frame_spec +frame_clause +simple_function_invocation +aggregate_function_invocation +window_function_invocation +common_table_stmt +order_by_stmt +limit_stmt +ordering_term +asc_desc +frame_left +frame_right +frame_single +window_function +offset +default_value +partition_by +order_by_expr +order_by_expr_asc_desc +expr_asc_desc +initial_select +recursive_select +unary_operator +error_message +module_argument +column_alias +keyword +name +function_name +schema_name +table_name +table_or_index_name +column_name +collation_name +foreign_table +index_name +trigger_name +view_name +module_name +pragma_name +savepoint_name +table_alias +transaction_name +window_name +alias +filename +base_window_name +simple_func +aggregate_func +table_function_name +any_name + + +atn: [4, 1, 193, 2056, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7, 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25, 2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2, 31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36, 7, 36, 2, 37, 7, 37, 2, 38, 7, 38, 2, 39, 7, 39, 2, 40, 7, 40, 2, 41, 7, 41, 2, 42, 7, 42, 2, 43, 7, 43, 2, 44, 7, 44, 2, 45, 7, 45, 2, 46, 7, 46, 2, 47, 7, 47, 2, 48, 7, 48, 2, 49, 7, 49, 2, 50, 7, 50, 2, 51, 7, 51, 2, 52, 7, 52, 2, 53, 7, 53, 2, 54, 7, 54, 2, 55, 7, 55, 2, 56, 7, 56, 2, 57, 7, 57, 2, 58, 7, 58, 2, 59, 7, 59, 2, 60, 7, 60, 2, 61, 7, 61, 2, 62, 7, 62, 2, 63, 7, 63, 2, 64, 7, 64, 2, 65, 7, 65, 2, 66, 7, 66, 2, 67, 7, 67, 2, 68, 7, 68, 2, 69, 7, 69, 2, 70, 7, 70, 2, 71, 7, 71, 2, 72, 7, 72, 2, 73, 7, 73, 2, 74, 7, 74, 2, 75, 7, 75, 2, 76, 7, 76, 2, 77, 7, 77, 2, 78, 7, 78, 2, 79, 7, 79, 2, 80, 7, 80, 2, 81, 7, 81, 2, 82, 7, 82, 2, 83, 7, 83, 2, 84, 7, 84, 2, 85, 7, 85, 2, 86, 7, 86, 2, 87, 7, 87, 2, 88, 7, 88, 2, 89, 7, 89, 2, 90, 7, 90, 2, 91, 7, 91, 2, 92, 7, 92, 2, 93, 7, 93, 2, 94, 7, 94, 2, 95, 7, 95, 2, 96, 7, 96, 2, 97, 7, 97, 2, 98, 7, 98, 2, 99, 7, 99, 2, 100, 7, 100, 2, 101, 7, 101, 2, 102, 7, 102, 2, 103, 7, 103, 2, 104, 7, 104, 2, 105, 7, 105, 2, 106, 7, 106, 2, 107, 7, 107, 2, 108, 7, 108, 2, 109, 7, 109, 2, 110, 7, 110, 2, 111, 7, 111, 2, 112, 7, 112, 1, 0, 5, 0, 228, 8, 0, 10, 0, 12, 0, 231, 9, 0, 1, 0, 1, 0, 1, 1, 5, 1, 236, 8, 1, 10, 1, 12, 1, 239, 9, 1, 1, 1, 1, 1, 4, 1, 243, 8, 1, 11, 1, 12, 1, 244, 1, 1, 5, 1, 248, 8, 1, 10, 1, 12, 1, 251, 9, 1, 1, 1, 5, 1, 254, 8, 1, 10, 1, 12, 1, 257, 9, 1, 1, 2, 1, 2, 1, 2, 3, 2, 262, 8, 2, 3, 2, 264, 8, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 3, 2, 290, 8, 2, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 3, 3, 297, 8, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 3, 3, 304, 8, 3, 1, 3, 1, 3, 1, 3, 1, 3, 3, 3, 310, 8, 3, 1, 3, 1, 3, 3, 3, 314, 8, 3, 1, 3, 1, 3, 1, 3, 3, 3, 319, 8, 3, 1, 3, 3, 3, 322, 8, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 3, 4, 329, 8, 4, 1, 4, 3, 4, 332, 8, 4, 1, 5, 1, 5, 3, 5, 336, 8, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 6, 1, 6, 3, 6, 344, 8, 6, 1, 6, 1, 6, 3, 6, 348, 8, 6, 3, 6, 350, 8, 6, 1, 7, 1, 7, 3, 7, 354, 8, 7, 1, 8, 1, 8, 3, 8, 358, 8, 8, 1, 8, 1, 8, 3, 8, 362, 8, 8, 1, 8, 3, 8, 365, 8, 8, 1, 9, 1, 9, 1, 9, 1, 10, 1, 10, 3, 10, 372, 8, 10, 1, 10, 1, 10, 1, 11, 1, 11, 3, 11, 378, 8, 11, 1, 11, 1, 11, 1, 11, 1, 11, 3, 11, 384, 8, 11, 1, 11, 1, 11, 1, 11, 3, 11, 389, 8, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 5, 11, 398, 8, 11, 10, 11, 12, 11, 401, 9, 11, 1, 11, 1, 11, 1, 11, 3, 11, 406, 8, 11, 1, 12, 1, 12, 3, 12, 410, 8, 12, 1, 12, 1, 12, 3, 12, 414, 8, 12, 1, 12, 3, 12, 417, 8, 12, 1, 13, 1, 13, 3, 13, 421, 8, 13, 1, 13, 1, 13, 1, 13, 1, 13, 3, 13, 427, 8, 13, 1, 13, 1, 13, 1, 13, 3, 13, 432, 8, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 5, 13, 439, 8, 13, 10, 13, 12, 13, 442, 9, 13, 1, 13, 1, 13, 5, 13, 446, 8, 13, 10, 13, 12, 13, 449, 9, 13, 1, 13, 1, 13, 1, 13, 3, 13, 454, 8, 13, 1, 13, 1, 13, 3, 13, 458, 8, 13, 1, 14, 1, 14, 3, 14, 462, 8, 14, 1, 14, 5, 14, 465, 8, 14, 10, 14, 12, 14, 468, 9, 14, 1, 15, 4, 15, 471, 8, 15, 11, 15, 12, 15, 472, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 3, 15, 485, 8, 15, 1, 16, 1, 16, 3, 16, 489, 8, 16, 1, 16, 1, 16, 1, 16, 3, 16, 494, 8, 16, 1, 16, 3, 16, 497, 8, 16, 1, 16, 3, 16, 500, 8, 16, 1, 16, 3, 16, 503, 8, 16, 1, 16, 1, 16, 3, 16, 507, 8, 16, 1, 16, 3, 16, 510, 8, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 3, 16, 524, 8, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 3, 16, 531, 8, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 3, 16, 538, 8, 16, 3, 16, 540, 8, 16, 1, 17, 3, 17, 543, 8, 17, 1, 17, 1, 17, 1, 18, 1, 18, 3, 18, 549, 8, 18, 1, 18, 1, 18, 1, 18, 3, 18, 554, 8, 18, 1, 18, 1, 18, 1, 18, 1, 18, 5, 18, 560, 8, 18, 10, 18, 12, 18, 563, 9, 18, 1, 18, 1, 18, 3, 18, 567, 8, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 5, 18, 580, 8, 18, 10, 18, 12, 18, 583, 9, 18, 1, 18, 1, 18, 1, 18, 3, 18, 588, 8, 18, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 5, 19, 596, 8, 19, 10, 19, 12, 19, 599, 9, 19, 1, 19, 1, 19, 3, 19, 603, 8, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 3, 19, 613, 8, 19, 1, 19, 1, 19, 5, 19, 617, 8, 19, 10, 19, 12, 19, 620, 9, 19, 1, 19, 3, 19, 623, 8, 19, 1, 19, 1, 19, 1, 19, 3, 19, 628, 8, 19, 3, 19, 630, 8, 19, 1, 20, 1, 20, 1, 20, 1, 20, 1, 21, 1, 21, 3, 21, 638, 8, 21, 1, 21, 1, 21, 1, 21, 1, 21, 3, 21, 644, 8, 21, 1, 21, 1, 21, 1, 21, 3, 21, 649, 8, 21, 1, 21, 1, 21, 1, 21, 1, 21, 1, 21, 3, 21, 656, 8, 21, 1, 21, 1, 21, 1, 21, 1, 21, 1, 21, 1, 21, 1, 21, 5, 21, 665, 8, 21, 10, 21, 12, 21, 668, 9, 21, 3, 21, 670, 8, 21, 3, 21, 672, 8, 21, 1, 21, 1, 21, 1, 21, 1, 21, 1, 21, 3, 21, 679, 8, 21, 1, 21, 1, 21, 3, 21, 683, 8, 21, 1, 21, 1, 21, 1, 21, 1, 21, 1, 21, 3, 21, 690, 8, 21, 1, 21, 1, 21, 4, 21, 694, 8, 21, 11, 21, 12, 21, 695, 1, 21, 1, 21, 1, 22, 1, 22, 3, 22, 702, 8, 22, 1, 22, 1, 22, 1, 22, 1, 22, 3, 22, 708, 8, 22, 1, 22, 1, 22, 1, 22, 3, 22, 713, 8, 22, 1, 22, 1, 22, 1, 22, 1, 22, 1, 22, 5, 22, 720, 8, 22, 10, 22, 12, 22, 723, 9, 22, 1, 22, 1, 22, 3, 22, 727, 8, 22, 1, 22, 1, 22, 1, 22, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 3, 23, 738, 8, 23, 1, 23, 1, 23, 1, 23, 3, 23, 743, 8, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 5, 23, 752, 8, 23, 10, 23, 12, 23, 755, 9, 23, 1, 23, 1, 23, 3, 23, 759, 8, 23, 1, 24, 1, 24, 3, 24, 763, 8, 24, 1, 24, 1, 24, 1, 24, 1, 24, 1, 24, 1, 24, 1, 24, 1, 24, 1, 24, 1, 24, 1, 24, 1, 24, 5, 24, 777, 8, 24, 10, 24, 12, 24, 780, 9, 24, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 5, 25, 787, 8, 25, 10, 25, 12, 25, 790, 9, 25, 1, 25, 1, 25, 3, 25, 794, 8, 25, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 3, 26, 802, 8, 26, 1, 26, 1, 26, 1, 26, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 5, 27, 812, 8, 27, 10, 27, 12, 27, 815, 9, 27, 1, 27, 1, 27, 3, 27, 819, 8, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 28, 3, 28, 827, 8, 28, 1, 28, 1, 28, 1, 28, 1, 28, 1, 28, 3, 28, 834, 8, 28, 1, 28, 3, 28, 837, 8, 28, 1, 29, 3, 29, 840, 8, 29, 1, 29, 1, 29, 1, 29, 1, 29, 1, 29, 3, 29, 847, 8, 29, 1, 29, 3, 29, 850, 8, 29, 1, 29, 3, 29, 853, 8, 29, 1, 29, 3, 29, 856, 8, 29, 1, 30, 1, 30, 3, 30, 860, 8, 30, 1, 30, 1, 30, 1, 31, 1, 31, 1, 31, 1, 31, 3, 31, 868, 8, 31, 1, 31, 1, 31, 1, 31, 3, 31, 873, 8, 31, 1, 31, 1, 31, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 3, 32, 883, 8, 32, 1, 32, 1, 32, 1, 32, 3, 32, 888, 8, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 3, 32, 897, 8, 32, 1, 32, 1, 32, 1, 32, 5, 32, 902, 8, 32, 10, 32, 12, 32, 905, 9, 32, 1, 32, 3, 32, 908, 8, 32, 1, 32, 1, 32, 3, 32, 912, 8, 32, 1, 32, 3, 32, 915, 8, 32, 1, 32, 1, 32, 1, 32, 1, 32, 5, 32, 921, 8, 32, 10, 32, 12, 32, 924, 9, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 3, 32, 936, 8, 32, 1, 32, 3, 32, 939, 8, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 3, 32, 947, 8, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 4, 32, 954, 8, 32, 11, 32, 12, 32, 955, 1, 32, 1, 32, 3, 32, 960, 8, 32, 1, 32, 1, 32, 1, 32, 3, 32, 965, 8, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 3, 32, 995, 8, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 3, 32, 1007, 8, 32, 1, 32, 1, 32, 1, 32, 3, 32, 1012, 8, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 3, 32, 1024, 8, 32, 1, 32, 1, 32, 1, 32, 1, 32, 3, 32, 1030, 8, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 3, 32, 1037, 8, 32, 1, 32, 1, 32, 3, 32, 1041, 8, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 5, 32, 1049, 8, 32, 10, 32, 12, 32, 1052, 9, 32, 3, 32, 1054, 8, 32, 1, 32, 1, 32, 1, 32, 1, 32, 3, 32, 1060, 8, 32, 1, 32, 1, 32, 1, 32, 1, 32, 3, 32, 1066, 8, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 5, 32, 1073, 8, 32, 10, 32, 12, 32, 1076, 9, 32, 3, 32, 1078, 8, 32, 1, 32, 1, 32, 3, 32, 1082, 8, 32, 5, 32, 1084, 8, 32, 10, 32, 12, 32, 1087, 9, 32, 1, 33, 1, 33, 1, 33, 1, 33, 1, 33, 1, 33, 3, 33, 1095, 8, 33, 1, 33, 1, 33, 1, 34, 1, 34, 1, 35, 1, 35, 1, 35, 1, 35, 5, 35, 1105, 8, 35, 10, 35, 12, 35, 1108, 9, 35, 1, 35, 1, 35, 1, 36, 1, 36, 1, 36, 1, 36, 5, 36, 1116, 8, 36, 10, 36, 12, 36, 1119, 9, 36, 1, 37, 3, 37, 1122, 8, 37, 1, 37, 1, 37, 1, 37, 1, 37, 1, 37, 3, 37, 1129, 8, 37, 1, 37, 1, 37, 1, 37, 1, 37, 3, 37, 1135, 8, 37, 1, 37, 1, 37, 1, 37, 3, 37, 1140, 8, 37, 1, 37, 1, 37, 1, 37, 1, 37, 5, 37, 1146, 8, 37, 10, 37, 12, 37, 1149, 9, 37, 1, 37, 1, 37, 3, 37, 1153, 8, 37, 1, 37, 1, 37, 3, 37, 1157, 8, 37, 1, 37, 3, 37, 1160, 8, 37, 1, 37, 1, 37, 3, 37, 1164, 8, 37, 1, 37, 3, 37, 1167, 8, 37, 1, 38, 1, 38, 1, 38, 1, 38, 5, 38, 1173, 8, 38, 10, 38, 12, 38, 1176, 9, 38, 1, 39, 1, 39, 1, 39, 1, 39, 1, 39, 1, 39, 5, 39, 1184, 8, 39, 10, 39, 12, 39, 1187, 9, 39, 1, 39, 1, 39, 1, 39, 3, 39, 1192, 8, 39, 3, 39, 1194, 8, 39, 1, 39, 1, 39, 1, 39, 1, 39, 1, 39, 1, 39, 3, 39, 1202, 8, 39, 1, 39, 1, 39, 1, 39, 1, 39, 1, 39, 3, 39, 1209, 8, 39, 1, 39, 1, 39, 1, 39, 5, 39, 1214, 8, 39, 10, 39, 12, 39, 1217, 9, 39, 1, 39, 1, 39, 3, 39, 1221, 8, 39, 3, 39, 1223, 8, 39, 1, 40, 1, 40, 1, 40, 1, 40, 3, 40, 1229, 8, 40, 1, 40, 1, 40, 1, 40, 1, 40, 1, 40, 1, 40, 1, 40, 3, 40, 1238, 8, 40, 1, 41, 1, 41, 1, 41, 3, 41, 1243, 8, 41, 1, 42, 1, 42, 1, 42, 1, 42, 1, 42, 3, 42, 1250, 8, 42, 1, 42, 1, 42, 3, 42, 1254, 8, 42, 3, 42, 1256, 8, 42, 1, 43, 3, 43, 1259, 8, 43, 1, 43, 1, 43, 1, 43, 1, 43, 5, 43, 1265, 8, 43, 10, 43, 12, 43, 1268, 9, 43, 1, 43, 3, 43, 1271, 8, 43, 1, 43, 3, 43, 1274, 8, 43, 1, 44, 1, 44, 1, 44, 1, 44, 3, 44, 1280, 8, 44, 5, 44, 1282, 8, 44, 10, 44, 12, 44, 1285, 9, 44, 1, 45, 1, 45, 3, 45, 1289, 8, 45, 1, 45, 1, 45, 1, 45, 5, 45, 1294, 8, 45, 10, 45, 12, 45, 1297, 9, 45, 1, 45, 1, 45, 1, 45, 1, 45, 5, 45, 1303, 8, 45, 10, 45, 12, 45, 1306, 9, 45, 1, 45, 3, 45, 1309, 8, 45, 3, 45, 1311, 8, 45, 1, 45, 1, 45, 3, 45, 1315, 8, 45, 1, 45, 1, 45, 1, 45, 1, 45, 1, 45, 5, 45, 1322, 8, 45, 10, 45, 12, 45, 1325, 9, 45, 1, 45, 1, 45, 3, 45, 1329, 8, 45, 3, 45, 1331, 8, 45, 1, 45, 1, 45, 1, 45, 1, 45, 1, 45, 1, 45, 1, 45, 1, 45, 1, 45, 5, 45, 1342, 8, 45, 10, 45, 12, 45, 1345, 9, 45, 3, 45, 1347, 8, 45, 1, 45, 3, 45, 1350, 8, 45, 1, 46, 1, 46, 1, 47, 3, 47, 1355, 8, 47, 1, 47, 1, 47, 3, 47, 1359, 8, 47, 1, 47, 3, 47, 1362, 8, 47, 1, 48, 3, 48, 1365, 8, 48, 1, 48, 1, 48, 1, 48, 3, 48, 1370, 8, 48, 1, 48, 1, 48, 3, 48, 1374, 8, 48, 1, 48, 4, 48, 1377, 8, 48, 11, 48, 12, 48, 1378, 1, 48, 3, 48, 1382, 8, 48, 1, 48, 3, 48, 1385, 8, 48, 1, 49, 1, 49, 1, 49, 3, 49, 1390, 8, 49, 1, 49, 1, 49, 3, 49, 1394, 8, 49, 1, 49, 3, 49, 1397, 8, 49, 1, 49, 1, 49, 1, 49, 1, 49, 1, 49, 3, 49, 1404, 8, 49, 1, 49, 1, 49, 1, 49, 3, 49, 1409, 8, 49, 1, 49, 1, 49, 1, 49, 1, 49, 1, 49, 5, 49, 1416, 8, 49, 10, 49, 12, 49, 1419, 9, 49, 1, 49, 1, 49, 3, 49, 1423, 8, 49, 1, 49, 3, 49, 1426, 8, 49, 1, 49, 1, 49, 1, 49, 1, 49, 5, 49, 1432, 8, 49, 10, 49, 12, 49, 1435, 9, 49, 1, 49, 3, 49, 1438, 8, 49, 1, 49, 1, 49, 1, 49, 1, 49, 1, 49, 1, 49, 3, 49, 1446, 8, 49, 1, 49, 3, 49, 1449, 8, 49, 3, 49, 1451, 8, 49, 1, 50, 1, 50, 1, 50, 1, 50, 1, 50, 1, 50, 1, 50, 3, 50, 1460, 8, 50, 1, 50, 3, 50, 1463, 8, 50, 3, 50, 1465, 8, 50, 1, 51, 1, 51, 3, 51, 1469, 8, 51, 1, 51, 1, 51, 3, 51, 1473, 8, 51, 1, 51, 1, 51, 3, 51, 1477, 8, 51, 1, 51, 3, 51, 1480, 8, 51, 1, 52, 1, 52, 1, 52, 1, 52, 1, 52, 1, 52, 1, 52, 5, 52, 1489, 8, 52, 10, 52, 12, 52, 1492, 9, 52, 1, 52, 1, 52, 3, 52, 1496, 8, 52, 1, 53, 1, 53, 3, 53, 1500, 8, 53, 1, 53, 1, 53, 3, 53, 1504, 8, 53, 1, 54, 3, 54, 1507, 8, 54, 1, 54, 1, 54, 1, 54, 3, 54, 1512, 8, 54, 1, 54, 1, 54, 1, 54, 1, 54, 3, 54, 1518, 8, 54, 1, 54, 1, 54, 1, 54, 1, 54, 1, 54, 3, 54, 1525, 8, 54, 1, 54, 1, 54, 1, 54, 5, 54, 1530, 8, 54, 10, 54, 12, 54, 1533, 9, 54, 1, 54, 1, 54, 1, 54, 1, 54, 5, 54, 1539, 8, 54, 10, 54, 12, 54, 1542, 9, 54, 1, 54, 3, 54, 1545, 8, 54, 3, 54, 1547, 8, 54, 1, 54, 1, 54, 3, 54, 1551, 8, 54, 1, 54, 3, 54, 1554, 8, 54, 1, 55, 1, 55, 1, 55, 1, 55, 5, 55, 1560, 8, 55, 10, 55, 12, 55, 1563, 9, 55, 1, 55, 1, 55, 1, 56, 3, 56, 1568, 8, 56, 1, 56, 1, 56, 1, 56, 3, 56, 1573, 8, 56, 1, 56, 1, 56, 1, 56, 1, 56, 3, 56, 1579, 8, 56, 1, 56, 1, 56, 1, 56, 1, 56, 1, 56, 3, 56, 1586, 8, 56, 1, 56, 1, 56, 1, 56, 5, 56, 1591, 8, 56, 10, 56, 12, 56, 1594, 9, 56, 1, 56, 1, 56, 3, 56, 1598, 8, 56, 1, 56, 3, 56, 1601, 8, 56, 1, 56, 3, 56, 1604, 8, 56, 1, 56, 3, 56, 1607, 8, 56, 1, 57, 1, 57, 1, 57, 3, 57, 1612, 8, 57, 1, 57, 1, 57, 1, 57, 3, 57, 1617, 8, 57, 1, 57, 1, 57, 1, 57, 1, 57, 1, 57, 3, 57, 1624, 8, 57, 1, 58, 1, 58, 3, 58, 1628, 8, 58, 1, 58, 1, 58, 3, 58, 1632, 8, 58, 1, 59, 1, 59, 1, 59, 1, 59, 1, 59, 1, 59, 1, 60, 1, 60, 3, 60, 1642, 8, 60, 1, 60, 1, 60, 1, 60, 1, 60, 1, 60, 5, 60, 1649, 8, 60, 10, 60, 12, 60, 1652, 9, 60, 3, 60, 1654, 8, 60, 1, 60, 1, 60, 1, 60, 1, 60, 1, 60, 5, 60, 1661, 8, 60, 10, 60, 12, 60, 1664, 9, 60, 1, 60, 3, 60, 1667, 8, 60, 1, 60, 1, 60, 1, 61, 1, 61, 1, 61, 1, 61, 3, 61, 1675, 8, 61, 1, 61, 1, 61, 1, 61, 1, 61, 1, 61, 5, 61, 1682, 8, 61, 10, 61, 12, 61, 1685, 9, 61, 3, 61, 1687, 8, 61, 1, 61, 1, 61, 1, 61, 1, 61, 1, 61, 5, 61, 1694, 8, 61, 10, 61, 12, 61, 1697, 9, 61, 3, 61, 1699, 8, 61, 1, 61, 3, 61, 1702, 8, 61, 1, 61, 3, 61, 1705, 8, 61, 1, 62, 1, 62, 1, 62, 1, 62, 1, 62, 1, 62, 1, 62, 1, 62, 3, 62, 1715, 8, 62, 3, 62, 1717, 8, 62, 1, 63, 1, 63, 1, 63, 1, 63, 1, 63, 1, 63, 1, 63, 3, 63, 1726, 8, 63, 1, 64, 1, 64, 1, 64, 1, 64, 1, 64, 5, 64, 1733, 8, 64, 10, 64, 12, 64, 1736, 9, 64, 1, 64, 3, 64, 1739, 8, 64, 1, 64, 1, 64, 1, 65, 1, 65, 1, 65, 3, 65, 1746, 8, 65, 1, 65, 1, 65, 1, 65, 5, 65, 1751, 8, 65, 10, 65, 12, 65, 1754, 9, 65, 1, 65, 3, 65, 1757, 8, 65, 1, 65, 1, 65, 3, 65, 1761, 8, 65, 1, 66, 1, 66, 1, 66, 1, 66, 1, 66, 5, 66, 1768, 8, 66, 10, 66, 12, 66, 1771, 9, 66, 1, 66, 3, 66, 1774, 8, 66, 1, 66, 1, 66, 3, 66, 1778, 8, 66, 1, 66, 1, 66, 1, 66, 3, 66, 1783, 8, 66, 1, 67, 1, 67, 3, 67, 1787, 8, 67, 1, 67, 1, 67, 1, 67, 5, 67, 1792, 8, 67, 10, 67, 12, 67, 1795, 9, 67, 1, 68, 1, 68, 1, 68, 1, 68, 1, 68, 5, 68, 1802, 8, 68, 10, 68, 12, 68, 1805, 9, 68, 1, 69, 1, 69, 1, 69, 1, 69, 3, 69, 1811, 8, 69, 1, 70, 1, 70, 1, 70, 3, 70, 1816, 8, 70, 1, 70, 3, 70, 1819, 8, 70, 1, 70, 1, 70, 3, 70, 1823, 8, 70, 1, 71, 1, 71, 1, 72, 1, 72, 1, 72, 1, 72, 1, 72, 1, 72, 1, 72, 1, 72, 1, 72, 1, 72, 3, 72, 1837, 8, 72, 1, 73, 1, 73, 1, 73, 1, 73, 1, 73, 1, 73, 1, 73, 1, 73, 1, 73, 1, 73, 3, 73, 1849, 8, 73, 1, 74, 1, 74, 1, 74, 1, 74, 1, 74, 1, 74, 1, 74, 3, 74, 1858, 8, 74, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 3, 75, 1867, 8, 75, 1, 75, 1, 75, 3, 75, 1871, 8, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 3, 75, 1881, 8, 75, 1, 75, 3, 75, 1884, 8, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 3, 75, 1893, 8, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 3, 75, 1902, 8, 75, 1, 75, 3, 75, 1905, 8, 75, 1, 75, 1, 75, 1, 75, 1, 75, 3, 75, 1911, 8, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 3, 75, 1925, 8, 75, 1, 75, 1, 75, 3, 75, 1929, 8, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 3, 75, 1940, 8, 75, 1, 75, 1, 75, 1, 75, 3, 75, 1945, 8, 75, 1, 76, 1, 76, 1, 76, 1, 77, 1, 77, 1, 77, 1, 78, 1, 78, 1, 78, 4, 78, 1956, 8, 78, 11, 78, 12, 78, 1957, 1, 79, 1, 79, 1, 79, 4, 79, 1963, 8, 79, 11, 79, 12, 79, 1964, 1, 80, 1, 80, 1, 80, 1, 80, 1, 81, 1, 81, 3, 81, 1973, 8, 81, 1, 81, 1, 81, 1, 81, 3, 81, 1978, 8, 81, 5, 81, 1980, 8, 81, 10, 81, 12, 81, 1983, 9, 81, 1, 82, 1, 82, 1, 83, 1, 83, 1, 84, 1, 84, 1, 85, 1, 85, 1, 86, 1, 86, 3, 86, 1995, 8, 86, 1, 87, 1, 87, 1, 88, 1, 88, 1, 89, 1, 89, 1, 90, 1, 90, 1, 91, 1, 91, 1, 92, 1, 92, 1, 93, 1, 93, 1, 94, 1, 94, 1, 95, 1, 95, 1, 96, 1, 96, 1, 97, 1, 97, 1, 98, 1, 98, 1, 99, 1, 99, 1, 100, 1, 100, 1, 101, 1, 101, 1, 102, 1, 102, 1, 103, 1, 103, 1, 104, 1, 104, 1, 105, 1, 105, 1, 106, 1, 106, 1, 107, 1, 107, 1, 108, 1, 108, 1, 109, 1, 109, 1, 110, 1, 110, 1, 111, 1, 111, 1, 112, 1, 112, 1, 112, 1, 112, 1, 112, 1, 112, 1, 112, 3, 112, 2054, 8, 112, 1, 112, 2, 440, 472, 1, 64, 113, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, 98, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 126, 128, 130, 132, 134, 136, 138, 140, 142, 144, 146, 148, 150, 152, 154, 156, 158, 160, 162, 164, 166, 168, 170, 172, 174, 176, 178, 180, 182, 184, 186, 188, 190, 192, 194, 196, 198, 200, 202, 204, 206, 208, 210, 212, 214, 216, 218, 220, 222, 224, 0, 28, 3, 0, 58, 58, 69, 69, 82, 82, 2, 0, 47, 47, 66, 66, 1, 0, 133, 134, 2, 0, 146, 146, 171, 171, 1, 0, 8, 9, 2, 0, 59, 59, 141, 141, 2, 0, 56, 56, 104, 104, 2, 0, 58, 58, 82, 82, 5, 0, 25, 25, 72, 72, 81, 81, 122, 122, 126, 126, 4, 0, 84, 84, 132, 132, 138, 138, 145, 145, 2, 0, 7, 7, 12, 13, 1, 0, 14, 17, 1, 0, 18, 21, 4, 0, 77, 77, 97, 97, 99, 99, 118, 118, 3, 0, 25, 25, 72, 72, 126, 126, 5, 0, 52, 54, 104, 104, 172, 173, 186, 186, 188, 189, 2, 0, 29, 29, 62, 62, 3, 0, 128, 128, 154, 154, 179, 179, 2, 0, 5, 5, 106, 106, 1, 0, 176, 177, 2, 0, 34, 34, 60, 60, 2, 0, 151, 151, 162, 162, 2, 0, 159, 159, 166, 166, 2, 0, 160, 160, 167, 168, 2, 0, 161, 161, 163, 163, 2, 0, 8, 10, 102, 102, 2, 0, 185, 185, 188, 188, 2, 0, 25, 123, 125, 180, 2338, 0, 229, 1, 0, 0, 0, 2, 237, 1, 0, 0, 0, 4, 263, 1, 0, 0, 0, 6, 291, 1, 0, 0, 0, 8, 323, 1, 0, 0, 0, 10, 333, 1, 0, 0, 0, 12, 341, 1, 0, 0, 0, 14, 351, 1, 0, 0, 0, 16, 355, 1, 0, 0, 0, 18, 366, 1, 0, 0, 0, 20, 369, 1, 0, 0, 0, 22, 375, 1, 0, 0, 0, 24, 409, 1, 0, 0, 0, 26, 418, 1, 0, 0, 0, 28, 459, 1, 0, 0, 0, 30, 470, 1, 0, 0, 0, 32, 488, 1, 0, 0, 0, 34, 542, 1, 0, 0, 0, 36, 548, 1, 0, 0, 0, 38, 589, 1, 0, 0, 0, 40, 631, 1, 0, 0, 0, 42, 635, 1, 0, 0, 0, 44, 699, 1, 0, 0, 0, 46, 731, 1, 0, 0, 0, 48, 760, 1, 0, 0, 0, 50, 781, 1, 0, 0, 0, 52, 795, 1, 0, 0, 0, 54, 806, 1, 0, 0, 0, 56, 826, 1, 0, 0, 0, 58, 839, 1, 0, 0, 0, 60, 857, 1, 0, 0, 0, 62, 863, 1, 0, 0, 0, 64, 964, 1, 0, 0, 0, 66, 1088, 1, 0, 0, 0, 68, 1098, 1, 0, 0, 0, 70, 1100, 1, 0, 0, 0, 72, 1111, 1, 0, 0, 0, 74, 1121, 1, 0, 0, 0, 76, 1168, 1, 0, 0, 0, 78, 1177, 1, 0, 0, 0, 80, 1224, 1, 0, 0, 0, 82, 1242, 1, 0, 0, 0, 84, 1244, 1, 0, 0, 0, 86, 1258, 1, 0, 0, 0, 88, 1275, 1, 0, 0, 0, 90, 1349, 1, 0, 0, 0, 92, 1351, 1, 0, 0, 0, 94, 1354, 1, 0, 0, 0, 96, 1364, 1, 0, 0, 0, 98, 1450, 1, 0, 0, 0, 100, 1464, 1, 0, 0, 0, 102, 1479, 1, 0, 0, 0, 104, 1495, 1, 0, 0, 0, 106, 1503, 1, 0, 0, 0, 108, 1506, 1, 0, 0, 0, 110, 1555, 1, 0, 0, 0, 112, 1567, 1, 0, 0, 0, 114, 1611, 1, 0, 0, 0, 116, 1625, 1, 0, 0, 0, 118, 1633, 1, 0, 0, 0, 120, 1639, 1, 0, 0, 0, 122, 1670, 1, 0, 0, 0, 124, 1706, 1, 0, 0, 0, 126, 1718, 1, 0, 0, 0, 128, 1727, 1, 0, 0, 0, 130, 1742, 1, 0, 0, 0, 132, 1762, 1, 0, 0, 0, 134, 1784, 1, 0, 0, 0, 136, 1796, 1, 0, 0, 0, 138, 1806, 1, 0, 0, 0, 140, 1812, 1, 0, 0, 0, 142, 1824, 1, 0, 0, 0, 144, 1836, 1, 0, 0, 0, 146, 1848, 1, 0, 0, 0, 148, 1857, 1, 0, 0, 0, 150, 1944, 1, 0, 0, 0, 152, 1946, 1, 0, 0, 0, 154, 1949, 1, 0, 0, 0, 156, 1952, 1, 0, 0, 0, 158, 1959, 1, 0, 0, 0, 160, 1966, 1, 0, 0, 0, 162, 1970, 1, 0, 0, 0, 164, 1984, 1, 0, 0, 0, 166, 1986, 1, 0, 0, 0, 168, 1988, 1, 0, 0, 0, 170, 1990, 1, 0, 0, 0, 172, 1994, 1, 0, 0, 0, 174, 1996, 1, 0, 0, 0, 176, 1998, 1, 0, 0, 0, 178, 2000, 1, 0, 0, 0, 180, 2002, 1, 0, 0, 0, 182, 2004, 1, 0, 0, 0, 184, 2006, 1, 0, 0, 0, 186, 2008, 1, 0, 0, 0, 188, 2010, 1, 0, 0, 0, 190, 2012, 1, 0, 0, 0, 192, 2014, 1, 0, 0, 0, 194, 2016, 1, 0, 0, 0, 196, 2018, 1, 0, 0, 0, 198, 2020, 1, 0, 0, 0, 200, 2022, 1, 0, 0, 0, 202, 2024, 1, 0, 0, 0, 204, 2026, 1, 0, 0, 0, 206, 2028, 1, 0, 0, 0, 208, 2030, 1, 0, 0, 0, 210, 2032, 1, 0, 0, 0, 212, 2034, 1, 0, 0, 0, 214, 2036, 1, 0, 0, 0, 216, 2038, 1, 0, 0, 0, 218, 2040, 1, 0, 0, 0, 220, 2042, 1, 0, 0, 0, 222, 2044, 1, 0, 0, 0, 224, 2053, 1, 0, 0, 0, 226, 228, 3, 2, 1, 0, 227, 226, 1, 0, 0, 0, 228, 231, 1, 0, 0, 0, 229, 227, 1, 0, 0, 0, 229, 230, 1, 0, 0, 0, 230, 232, 1, 0, 0, 0, 231, 229, 1, 0, 0, 0, 232, 233, 5, 0, 0, 1, 233, 1, 1, 0, 0, 0, 234, 236, 5, 1, 0, 0, 235, 234, 1, 0, 0, 0, 236, 239, 1, 0, 0, 0, 237, 235, 1, 0, 0, 0, 237, 238, 1, 0, 0, 0, 238, 240, 1, 0, 0, 0, 239, 237, 1, 0, 0, 0, 240, 249, 3, 4, 2, 0, 241, 243, 5, 1, 0, 0, 242, 241, 1, 0, 0, 0, 243, 244, 1, 0, 0, 0, 244, 242, 1, 0, 0, 0, 244, 245, 1, 0, 0, 0, 245, 246, 1, 0, 0, 0, 246, 248, 3, 4, 2, 0, 247, 242, 1, 0, 0, 0, 248, 251, 1, 0, 0, 0, 249, 247, 1, 0, 0, 0, 249, 250, 1, 0, 0, 0, 250, 255, 1, 0, 0, 0, 251, 249, 1, 0, 0, 0, 252, 254, 5, 1, 0, 0, 253, 252, 1, 0, 0, 0, 254, 257, 1, 0, 0, 0, 255, 253, 1, 0, 0, 0, 255, 256, 1, 0, 0, 0, 256, 3, 1, 0, 0, 0, 257, 255, 1, 0, 0, 0, 258, 261, 5, 71, 0, 0, 259, 260, 5, 114, 0, 0, 260, 262, 5, 111, 0, 0, 261, 259, 1, 0, 0, 0, 261, 262, 1, 0, 0, 0, 262, 264, 1, 0, 0, 0, 263, 258, 1, 0, 0, 0, 263, 264, 1, 0, 0, 0, 264, 289, 1, 0, 0, 0, 265, 290, 3, 6, 3, 0, 266, 290, 3, 8, 4, 0, 267, 290, 3, 10, 5, 0, 268, 290, 3, 12, 6, 0, 269, 290, 3, 14, 7, 0, 270, 290, 3, 22, 11, 0, 271, 290, 3, 26, 13, 0, 272, 290, 3, 42, 21, 0, 273, 290, 3, 44, 22, 0, 274, 290, 3, 46, 23, 0, 275, 290, 3, 56, 28, 0, 276, 290, 3, 58, 29, 0, 277, 290, 3, 60, 30, 0, 278, 290, 3, 62, 31, 0, 279, 290, 3, 74, 37, 0, 280, 290, 3, 80, 40, 0, 281, 290, 3, 84, 42, 0, 282, 290, 3, 20, 10, 0, 283, 290, 3, 16, 8, 0, 284, 290, 3, 18, 9, 0, 285, 290, 3, 86, 43, 0, 286, 290, 3, 108, 54, 0, 287, 290, 3, 112, 56, 0, 288, 290, 3, 116, 58, 0, 289, 265, 1, 0, 0, 0, 289, 266, 1, 0, 0, 0, 289, 267, 1, 0, 0, 0, 289, 268, 1, 0, 0, 0, 289, 269, 1, 0, 0, 0, 289, 270, 1, 0, 0, 0, 289, 271, 1, 0, 0, 0, 289, 272, 1, 0, 0, 0, 289, 273, 1, 0, 0, 0, 289, 274, 1, 0, 0, 0, 289, 275, 1, 0, 0, 0, 289, 276, 1, 0, 0, 0, 289, 277, 1, 0, 0, 0, 289, 278, 1, 0, 0, 0, 289, 279, 1, 0, 0, 0, 289, 280, 1, 0, 0, 0, 289, 281, 1, 0, 0, 0, 289, 282, 1, 0, 0, 0, 289, 283, 1, 0, 0, 0, 289, 284, 1, 0, 0, 0, 289, 285, 1, 0, 0, 0, 289, 286, 1, 0, 0, 0, 289, 287, 1, 0, 0, 0, 289, 288, 1, 0, 0, 0, 290, 5, 1, 0, 0, 0, 291, 292, 5, 30, 0, 0, 292, 296, 5, 132, 0, 0, 293, 294, 3, 182, 91, 0, 294, 295, 5, 2, 0, 0, 295, 297, 1, 0, 0, 0, 296, 293, 1, 0, 0, 0, 296, 297, 1, 0, 0, 0, 297, 298, 1, 0, 0, 0, 298, 321, 3, 184, 92, 0, 299, 309, 5, 121, 0, 0, 300, 301, 5, 136, 0, 0, 301, 310, 3, 184, 92, 0, 302, 304, 5, 46, 0, 0, 303, 302, 1, 0, 0, 0, 303, 304, 1, 0, 0, 0, 304, 305, 1, 0, 0, 0, 305, 306, 3, 188, 94, 0, 306, 307, 5, 136, 0, 0, 307, 308, 3, 188, 94, 0, 308, 310, 1, 0, 0, 0, 309, 300, 1, 0, 0, 0, 309, 303, 1, 0, 0, 0, 310, 322, 1, 0, 0, 0, 311, 313, 5, 27, 0, 0, 312, 314, 5, 46, 0, 0, 313, 312, 1, 0, 0, 0, 313, 314, 1, 0, 0, 0, 314, 315, 1, 0, 0, 0, 315, 322, 3, 28, 14, 0, 316, 318, 5, 63, 0, 0, 317, 319, 5, 46, 0, 0, 318, 317, 1, 0, 0, 0, 318, 319, 1, 0, 0, 0, 319, 320, 1, 0, 0, 0, 320, 322, 3, 188, 94, 0, 321, 299, 1, 0, 0, 0, 321, 311, 1, 0, 0, 0, 321, 316, 1, 0, 0, 0, 322, 7, 1, 0, 0, 0, 323, 331, 5, 31, 0, 0, 324, 332, 3, 182, 91, 0, 325, 326, 3, 182, 91, 0, 326, 327, 5, 2, 0, 0, 327, 329, 1, 0, 0, 0, 328, 325, 1, 0, 0, 0, 328, 329, 1, 0, 0, 0, 329, 330, 1, 0, 0, 0, 330, 332, 3, 186, 93, 0, 331, 324, 1, 0, 0, 0, 331, 328, 1, 0, 0, 0, 331, 332, 1, 0, 0, 0, 332, 9, 1, 0, 0, 0, 333, 335, 5, 35, 0, 0, 334, 336, 5, 55, 0, 0, 335, 334, 1, 0, 0, 0, 335, 336, 1, 0, 0, 0, 336, 337, 1, 0, 0, 0, 337, 338, 3, 64, 32, 0, 338, 339, 5, 33, 0, 0, 339, 340, 3, 182, 91, 0, 340, 11, 1, 0, 0, 0, 341, 343, 5, 38, 0, 0, 342, 344, 7, 0, 0, 0, 343, 342, 1, 0, 0, 0, 343, 344, 1, 0, 0, 0, 344, 349, 1, 0, 0, 0, 345, 347, 5, 137, 0, 0, 346, 348, 3, 208, 104, 0, 347, 346, 1, 0, 0, 0, 347, 348, 1, 0, 0, 0, 348, 350, 1, 0, 0, 0, 349, 345, 1, 0, 0, 0, 349, 350, 1, 0, 0, 0, 350, 13, 1, 0, 0, 0, 351, 353, 7, 1, 0, 0, 352, 354, 5, 137, 0, 0, 353, 352, 1, 0, 0, 0, 353, 354, 1, 0, 0, 0, 354, 15, 1, 0, 0, 0, 355, 357, 5, 126, 0, 0, 356, 358, 5, 137, 0, 0, 357, 356, 1, 0, 0, 0, 357, 358, 1, 0, 0, 0, 358, 364, 1, 0, 0, 0, 359, 361, 5, 136, 0, 0, 360, 362, 5, 129, 0, 0, 361, 360, 1, 0, 0, 0, 361, 362, 1, 0, 0, 0, 362, 363, 1, 0, 0, 0, 363, 365, 3, 204, 102, 0, 364, 359, 1, 0, 0, 0, 364, 365, 1, 0, 0, 0, 365, 17, 1, 0, 0, 0, 366, 367, 5, 129, 0, 0, 367, 368, 3, 204, 102, 0, 368, 19, 1, 0, 0, 0, 369, 371, 5, 120, 0, 0, 370, 372, 5, 129, 0, 0, 371, 370, 1, 0, 0, 0, 371, 372, 1, 0, 0, 0, 372, 373, 1, 0, 0, 0, 373, 374, 3, 204, 102, 0, 374, 21, 1, 0, 0, 0, 375, 377, 5, 50, 0, 0, 376, 378, 5, 140, 0, 0, 377, 376, 1, 0, 0, 0, 377, 378, 1, 0, 0, 0, 378, 379, 1, 0, 0, 0, 379, 383, 5, 84, 0, 0, 380, 381, 5, 80, 0, 0, 381, 382, 5, 102, 0, 0, 382, 384, 5, 70, 0, 0, 383, 380, 1, 0, 0, 0, 383, 384, 1, 0, 0, 0, 384, 388, 1, 0, 0, 0, 385, 386, 3, 182, 91, 0, 386, 387, 5, 2, 0, 0, 387, 389, 1, 0, 0, 0, 388, 385, 1, 0, 0, 0, 388, 389, 1, 0, 0, 0, 389, 390, 1, 0, 0, 0, 390, 391, 3, 194, 97, 0, 391, 392, 5, 107, 0, 0, 392, 393, 3, 184, 92, 0, 393, 394, 5, 3, 0, 0, 394, 399, 3, 24, 12, 0, 395, 396, 5, 5, 0, 0, 396, 398, 3, 24, 12, 0, 397, 395, 1, 0, 0, 0, 398, 401, 1, 0, 0, 0, 399, 397, 1, 0, 0, 0, 399, 400, 1, 0, 0, 0, 400, 402, 1, 0, 0, 0, 401, 399, 1, 0, 0, 0, 402, 405, 5, 4, 0, 0, 403, 404, 5, 148, 0, 0, 404, 406, 3, 64, 32, 0, 405, 403, 1, 0, 0, 0, 405, 406, 1, 0, 0, 0, 406, 23, 1, 0, 0, 0, 407, 410, 3, 188, 94, 0, 408, 410, 3, 64, 32, 0, 409, 407, 1, 0, 0, 0, 409, 408, 1, 0, 0, 0, 410, 413, 1, 0, 0, 0, 411, 412, 5, 45, 0, 0, 412, 414, 3, 190, 95, 0, 413, 411, 1, 0, 0, 0, 413, 414, 1, 0, 0, 0, 414, 416, 1, 0, 0, 0, 415, 417, 3, 142, 71, 0, 416, 415, 1, 0, 0, 0, 416, 417, 1, 0, 0, 0, 417, 25, 1, 0, 0, 0, 418, 420, 5, 50, 0, 0, 419, 421, 7, 2, 0, 0, 420, 419, 1, 0, 0, 0, 420, 421, 1, 0, 0, 0, 421, 422, 1, 0, 0, 0, 422, 426, 5, 132, 0, 0, 423, 424, 5, 80, 0, 0, 424, 425, 5, 102, 0, 0, 425, 427, 5, 70, 0, 0, 426, 423, 1, 0, 0, 0, 426, 427, 1, 0, 0, 0, 427, 431, 1, 0, 0, 0, 428, 429, 3, 182, 91, 0, 429, 430, 5, 2, 0, 0, 430, 432, 1, 0, 0, 0, 431, 428, 1, 0, 0, 0, 431, 432, 1, 0, 0, 0, 432, 433, 1, 0, 0, 0, 433, 457, 3, 184, 92, 0, 434, 435, 5, 3, 0, 0, 435, 440, 3, 28, 14, 0, 436, 437, 5, 5, 0, 0, 437, 439, 3, 28, 14, 0, 438, 436, 1, 0, 0, 0, 439, 442, 1, 0, 0, 0, 440, 441, 1, 0, 0, 0, 440, 438, 1, 0, 0, 0, 441, 447, 1, 0, 0, 0, 442, 440, 1, 0, 0, 0, 443, 444, 5, 5, 0, 0, 444, 446, 3, 36, 18, 0, 445, 443, 1, 0, 0, 0, 446, 449, 1, 0, 0, 0, 447, 445, 1, 0, 0, 0, 447, 448, 1, 0, 0, 0, 448, 450, 1, 0, 0, 0, 449, 447, 1, 0, 0, 0, 450, 453, 5, 4, 0, 0, 451, 452, 5, 150, 0, 0, 452, 454, 5, 185, 0, 0, 453, 451, 1, 0, 0, 0, 453, 454, 1, 0, 0, 0, 454, 458, 1, 0, 0, 0, 455, 456, 5, 33, 0, 0, 456, 458, 3, 86, 43, 0, 457, 434, 1, 0, 0, 0, 457, 455, 1, 0, 0, 0, 458, 27, 1, 0, 0, 0, 459, 461, 3, 188, 94, 0, 460, 462, 3, 30, 15, 0, 461, 460, 1, 0, 0, 0, 461, 462, 1, 0, 0, 0, 462, 466, 1, 0, 0, 0, 463, 465, 3, 32, 16, 0, 464, 463, 1, 0, 0, 0, 465, 468, 1, 0, 0, 0, 466, 464, 1, 0, 0, 0, 466, 467, 1, 0, 0, 0, 467, 29, 1, 0, 0, 0, 468, 466, 1, 0, 0, 0, 469, 471, 3, 178, 89, 0, 470, 469, 1, 0, 0, 0, 471, 472, 1, 0, 0, 0, 472, 473, 1, 0, 0, 0, 472, 470, 1, 0, 0, 0, 473, 484, 1, 0, 0, 0, 474, 475, 5, 3, 0, 0, 475, 476, 3, 34, 17, 0, 476, 477, 5, 4, 0, 0, 477, 485, 1, 0, 0, 0, 478, 479, 5, 3, 0, 0, 479, 480, 3, 34, 17, 0, 480, 481, 5, 5, 0, 0, 481, 482, 3, 34, 17, 0, 482, 483, 5, 4, 0, 0, 483, 485, 1, 0, 0, 0, 484, 474, 1, 0, 0, 0, 484, 478, 1, 0, 0, 0, 484, 485, 1, 0, 0, 0, 485, 31, 1, 0, 0, 0, 486, 487, 5, 49, 0, 0, 487, 489, 3, 178, 89, 0, 488, 486, 1, 0, 0, 0, 488, 489, 1, 0, 0, 0, 489, 539, 1, 0, 0, 0, 490, 491, 5, 113, 0, 0, 491, 493, 5, 95, 0, 0, 492, 494, 3, 142, 71, 0, 493, 492, 1, 0, 0, 0, 493, 494, 1, 0, 0, 0, 494, 496, 1, 0, 0, 0, 495, 497, 3, 40, 20, 0, 496, 495, 1, 0, 0, 0, 496, 497, 1, 0, 0, 0, 497, 499, 1, 0, 0, 0, 498, 500, 5, 36, 0, 0, 499, 498, 1, 0, 0, 0, 499, 500, 1, 0, 0, 0, 500, 540, 1, 0, 0, 0, 501, 503, 5, 102, 0, 0, 502, 501, 1, 0, 0, 0, 502, 503, 1, 0, 0, 0, 503, 504, 1, 0, 0, 0, 504, 507, 5, 104, 0, 0, 505, 507, 5, 140, 0, 0, 506, 502, 1, 0, 0, 0, 506, 505, 1, 0, 0, 0, 507, 509, 1, 0, 0, 0, 508, 510, 3, 40, 20, 0, 509, 508, 1, 0, 0, 0, 509, 510, 1, 0, 0, 0, 510, 540, 1, 0, 0, 0, 511, 512, 5, 44, 0, 0, 512, 513, 5, 3, 0, 0, 513, 514, 3, 64, 32, 0, 514, 515, 5, 4, 0, 0, 515, 540, 1, 0, 0, 0, 516, 523, 5, 56, 0, 0, 517, 524, 3, 34, 17, 0, 518, 524, 3, 68, 34, 0, 519, 520, 5, 3, 0, 0, 520, 521, 3, 64, 32, 0, 521, 522, 5, 4, 0, 0, 522, 524, 1, 0, 0, 0, 523, 517, 1, 0, 0, 0, 523, 518, 1, 0, 0, 0, 523, 519, 1, 0, 0, 0, 524, 540, 1, 0, 0, 0, 525, 526, 5, 45, 0, 0, 526, 540, 3, 190, 95, 0, 527, 540, 3, 38, 19, 0, 528, 529, 5, 169, 0, 0, 529, 531, 5, 170, 0, 0, 530, 528, 1, 0, 0, 0, 530, 531, 1, 0, 0, 0, 531, 532, 1, 0, 0, 0, 532, 533, 5, 33, 0, 0, 533, 534, 5, 3, 0, 0, 534, 535, 3, 64, 32, 0, 535, 537, 5, 4, 0, 0, 536, 538, 7, 3, 0, 0, 537, 536, 1, 0, 0, 0, 537, 538, 1, 0, 0, 0, 538, 540, 1, 0, 0, 0, 539, 490, 1, 0, 0, 0, 539, 506, 1, 0, 0, 0, 539, 511, 1, 0, 0, 0, 539, 516, 1, 0, 0, 0, 539, 525, 1, 0, 0, 0, 539, 527, 1, 0, 0, 0, 539, 530, 1, 0, 0, 0, 540, 33, 1, 0, 0, 0, 541, 543, 7, 4, 0, 0, 542, 541, 1, 0, 0, 0, 542, 543, 1, 0, 0, 0, 543, 544, 1, 0, 0, 0, 544, 545, 5, 186, 0, 0, 545, 35, 1, 0, 0, 0, 546, 547, 5, 49, 0, 0, 547, 549, 3, 178, 89, 0, 548, 546, 1, 0, 0, 0, 548, 549, 1, 0, 0, 0, 549, 587, 1, 0, 0, 0, 550, 551, 5, 113, 0, 0, 551, 554, 5, 95, 0, 0, 552, 554, 5, 140, 0, 0, 553, 550, 1, 0, 0, 0, 553, 552, 1, 0, 0, 0, 554, 555, 1, 0, 0, 0, 555, 556, 5, 3, 0, 0, 556, 561, 3, 24, 12, 0, 557, 558, 5, 5, 0, 0, 558, 560, 3, 24, 12, 0, 559, 557, 1, 0, 0, 0, 560, 563, 1, 0, 0, 0, 561, 559, 1, 0, 0, 0, 561, 562, 1, 0, 0, 0, 562, 564, 1, 0, 0, 0, 563, 561, 1, 0, 0, 0, 564, 566, 5, 4, 0, 0, 565, 567, 3, 40, 20, 0, 566, 565, 1, 0, 0, 0, 566, 567, 1, 0, 0, 0, 567, 588, 1, 0, 0, 0, 568, 569, 5, 44, 0, 0, 569, 570, 5, 3, 0, 0, 570, 571, 3, 64, 32, 0, 571, 572, 5, 4, 0, 0, 572, 588, 1, 0, 0, 0, 573, 574, 5, 74, 0, 0, 574, 575, 5, 95, 0, 0, 575, 576, 5, 3, 0, 0, 576, 581, 3, 188, 94, 0, 577, 578, 5, 5, 0, 0, 578, 580, 3, 188, 94, 0, 579, 577, 1, 0, 0, 0, 580, 583, 1, 0, 0, 0, 581, 579, 1, 0, 0, 0, 581, 582, 1, 0, 0, 0, 582, 584, 1, 0, 0, 0, 583, 581, 1, 0, 0, 0, 584, 585, 5, 4, 0, 0, 585, 586, 3, 38, 19, 0, 586, 588, 1, 0, 0, 0, 587, 553, 1, 0, 0, 0, 587, 568, 1, 0, 0, 0, 587, 573, 1, 0, 0, 0, 588, 37, 1, 0, 0, 0, 589, 590, 5, 117, 0, 0, 590, 602, 3, 192, 96, 0, 591, 592, 5, 3, 0, 0, 592, 597, 3, 188, 94, 0, 593, 594, 5, 5, 0, 0, 594, 596, 3, 188, 94, 0, 595, 593, 1, 0, 0, 0, 596, 599, 1, 0, 0, 0, 597, 595, 1, 0, 0, 0, 597, 598, 1, 0, 0, 0, 598, 600, 1, 0, 0, 0, 599, 597, 1, 0, 0, 0, 600, 601, 5, 4, 0, 0, 601, 603, 1, 0, 0, 0, 602, 591, 1, 0, 0, 0, 602, 603, 1, 0, 0, 0, 603, 618, 1, 0, 0, 0, 604, 605, 5, 107, 0, 0, 605, 612, 7, 5, 0, 0, 606, 607, 5, 131, 0, 0, 607, 613, 7, 6, 0, 0, 608, 613, 5, 41, 0, 0, 609, 613, 5, 123, 0, 0, 610, 611, 5, 101, 0, 0, 611, 613, 5, 26, 0, 0, 612, 606, 1, 0, 0, 0, 612, 608, 1, 0, 0, 0, 612, 609, 1, 0, 0, 0, 612, 610, 1, 0, 0, 0, 613, 617, 1, 0, 0, 0, 614, 615, 5, 99, 0, 0, 615, 617, 3, 178, 89, 0, 616, 604, 1, 0, 0, 0, 616, 614, 1, 0, 0, 0, 617, 620, 1, 0, 0, 0, 618, 616, 1, 0, 0, 0, 618, 619, 1, 0, 0, 0, 619, 629, 1, 0, 0, 0, 620, 618, 1, 0, 0, 0, 621, 623, 5, 102, 0, 0, 622, 621, 1, 0, 0, 0, 622, 623, 1, 0, 0, 0, 623, 624, 1, 0, 0, 0, 624, 627, 5, 57, 0, 0, 625, 626, 5, 86, 0, 0, 626, 628, 7, 7, 0, 0, 627, 625, 1, 0, 0, 0, 627, 628, 1, 0, 0, 0, 628, 630, 1, 0, 0, 0, 629, 622, 1, 0, 0, 0, 629, 630, 1, 0, 0, 0, 630, 39, 1, 0, 0, 0, 631, 632, 5, 107, 0, 0, 632, 633, 5, 48, 0, 0, 633, 634, 7, 8, 0, 0, 634, 41, 1, 0, 0, 0, 635, 637, 5, 50, 0, 0, 636, 638, 7, 2, 0, 0, 637, 636, 1, 0, 0, 0, 637, 638, 1, 0, 0, 0, 638, 639, 1, 0, 0, 0, 639, 643, 5, 138, 0, 0, 640, 641, 5, 80, 0, 0, 641, 642, 5, 102, 0, 0, 642, 644, 5, 70, 0, 0, 643, 640, 1, 0, 0, 0, 643, 644, 1, 0, 0, 0, 644, 648, 1, 0, 0, 0, 645, 646, 3, 182, 91, 0, 646, 647, 5, 2, 0, 0, 647, 649, 1, 0, 0, 0, 648, 645, 1, 0, 0, 0, 648, 649, 1, 0, 0, 0, 649, 650, 1, 0, 0, 0, 650, 655, 3, 196, 98, 0, 651, 656, 5, 37, 0, 0, 652, 656, 5, 28, 0, 0, 653, 654, 5, 89, 0, 0, 654, 656, 5, 105, 0, 0, 655, 651, 1, 0, 0, 0, 655, 652, 1, 0, 0, 0, 655, 653, 1, 0, 0, 0, 655, 656, 1, 0, 0, 0, 656, 671, 1, 0, 0, 0, 657, 672, 5, 59, 0, 0, 658, 672, 5, 88, 0, 0, 659, 669, 5, 141, 0, 0, 660, 661, 5, 105, 0, 0, 661, 666, 3, 188, 94, 0, 662, 663, 5, 5, 0, 0, 663, 665, 3, 188, 94, 0, 664, 662, 1, 0, 0, 0, 665, 668, 1, 0, 0, 0, 666, 664, 1, 0, 0, 0, 666, 667, 1, 0, 0, 0, 667, 670, 1, 0, 0, 0, 668, 666, 1, 0, 0, 0, 669, 660, 1, 0, 0, 0, 669, 670, 1, 0, 0, 0, 670, 672, 1, 0, 0, 0, 671, 657, 1, 0, 0, 0, 671, 658, 1, 0, 0, 0, 671, 659, 1, 0, 0, 0, 672, 673, 1, 0, 0, 0, 673, 674, 5, 107, 0, 0, 674, 678, 3, 184, 92, 0, 675, 676, 5, 73, 0, 0, 676, 677, 5, 64, 0, 0, 677, 679, 5, 127, 0, 0, 678, 675, 1, 0, 0, 0, 678, 679, 1, 0, 0, 0, 679, 682, 1, 0, 0, 0, 680, 681, 5, 147, 0, 0, 681, 683, 3, 64, 32, 0, 682, 680, 1, 0, 0, 0, 682, 683, 1, 0, 0, 0, 683, 684, 1, 0, 0, 0, 684, 693, 5, 38, 0, 0, 685, 690, 3, 108, 54, 0, 686, 690, 3, 74, 37, 0, 687, 690, 3, 56, 28, 0, 688, 690, 3, 86, 43, 0, 689, 685, 1, 0, 0, 0, 689, 686, 1, 0, 0, 0, 689, 687, 1, 0, 0, 0, 689, 688, 1, 0, 0, 0, 690, 691, 1, 0, 0, 0, 691, 692, 5, 1, 0, 0, 692, 694, 1, 0, 0, 0, 693, 689, 1, 0, 0, 0, 694, 695, 1, 0, 0, 0, 695, 693, 1, 0, 0, 0, 695, 696, 1, 0, 0, 0, 696, 697, 1, 0, 0, 0, 697, 698, 5, 66, 0, 0, 698, 43, 1, 0, 0, 0, 699, 701, 5, 50, 0, 0, 700, 702, 7, 2, 0, 0, 701, 700, 1, 0, 0, 0, 701, 702, 1, 0, 0, 0, 702, 703, 1, 0, 0, 0, 703, 707, 5, 145, 0, 0, 704, 705, 5, 80, 0, 0, 705, 706, 5, 102, 0, 0, 706, 708, 5, 70, 0, 0, 707, 704, 1, 0, 0, 0, 707, 708, 1, 0, 0, 0, 708, 712, 1, 0, 0, 0, 709, 710, 3, 182, 91, 0, 710, 711, 5, 2, 0, 0, 711, 713, 1, 0, 0, 0, 712, 709, 1, 0, 0, 0, 712, 713, 1, 0, 0, 0, 713, 714, 1, 0, 0, 0, 714, 726, 3, 198, 99, 0, 715, 716, 5, 3, 0, 0, 716, 721, 3, 188, 94, 0, 717, 718, 5, 5, 0, 0, 718, 720, 3, 188, 94, 0, 719, 717, 1, 0, 0, 0, 720, 723, 1, 0, 0, 0, 721, 719, 1, 0, 0, 0, 721, 722, 1, 0, 0, 0, 722, 724, 1, 0, 0, 0, 723, 721, 1, 0, 0, 0, 724, 725, 5, 4, 0, 0, 725, 727, 1, 0, 0, 0, 726, 715, 1, 0, 0, 0, 726, 727, 1, 0, 0, 0, 727, 728, 1, 0, 0, 0, 728, 729, 5, 33, 0, 0, 729, 730, 3, 86, 43, 0, 730, 45, 1, 0, 0, 0, 731, 732, 5, 50, 0, 0, 732, 733, 5, 146, 0, 0, 733, 737, 5, 132, 0, 0, 734, 735, 5, 80, 0, 0, 735, 736, 5, 102, 0, 0, 736, 738, 5, 70, 0, 0, 737, 734, 1, 0, 0, 0, 737, 738, 1, 0, 0, 0, 738, 742, 1, 0, 0, 0, 739, 740, 3, 182, 91, 0, 740, 741, 5, 2, 0, 0, 741, 743, 1, 0, 0, 0, 742, 739, 1, 0, 0, 0, 742, 743, 1, 0, 0, 0, 743, 744, 1, 0, 0, 0, 744, 745, 3, 184, 92, 0, 745, 746, 5, 142, 0, 0, 746, 758, 3, 200, 100, 0, 747, 748, 5, 3, 0, 0, 748, 753, 3, 172, 86, 0, 749, 750, 5, 5, 0, 0, 750, 752, 3, 172, 86, 0, 751, 749, 1, 0, 0, 0, 752, 755, 1, 0, 0, 0, 753, 751, 1, 0, 0, 0, 753, 754, 1, 0, 0, 0, 754, 756, 1, 0, 0, 0, 755, 753, 1, 0, 0, 0, 756, 757, 5, 4, 0, 0, 757, 759, 1, 0, 0, 0, 758, 747, 1, 0, 0, 0, 758, 759, 1, 0, 0, 0, 759, 47, 1, 0, 0, 0, 760, 762, 5, 149, 0, 0, 761, 763, 5, 116, 0, 0, 762, 761, 1, 0, 0, 0, 762, 763, 1, 0, 0, 0, 763, 764, 1, 0, 0, 0, 764, 765, 3, 50, 25, 0, 765, 766, 5, 33, 0, 0, 766, 767, 5, 3, 0, 0, 767, 768, 3, 86, 43, 0, 768, 778, 5, 4, 0, 0, 769, 770, 5, 5, 0, 0, 770, 771, 3, 50, 25, 0, 771, 772, 5, 33, 0, 0, 772, 773, 5, 3, 0, 0, 773, 774, 3, 86, 43, 0, 774, 775, 5, 4, 0, 0, 775, 777, 1, 0, 0, 0, 776, 769, 1, 0, 0, 0, 777, 780, 1, 0, 0, 0, 778, 776, 1, 0, 0, 0, 778, 779, 1, 0, 0, 0, 779, 49, 1, 0, 0, 0, 780, 778, 1, 0, 0, 0, 781, 793, 3, 184, 92, 0, 782, 783, 5, 3, 0, 0, 783, 788, 3, 188, 94, 0, 784, 785, 5, 5, 0, 0, 785, 787, 3, 188, 94, 0, 786, 784, 1, 0, 0, 0, 787, 790, 1, 0, 0, 0, 788, 786, 1, 0, 0, 0, 788, 789, 1, 0, 0, 0, 789, 791, 1, 0, 0, 0, 790, 788, 1, 0, 0, 0, 791, 792, 5, 4, 0, 0, 792, 794, 1, 0, 0, 0, 793, 782, 1, 0, 0, 0, 793, 794, 1, 0, 0, 0, 794, 51, 1, 0, 0, 0, 795, 796, 3, 50, 25, 0, 796, 797, 5, 33, 0, 0, 797, 798, 5, 3, 0, 0, 798, 799, 3, 164, 82, 0, 799, 801, 5, 139, 0, 0, 800, 802, 5, 29, 0, 0, 801, 800, 1, 0, 0, 0, 801, 802, 1, 0, 0, 0, 802, 803, 1, 0, 0, 0, 803, 804, 3, 166, 83, 0, 804, 805, 5, 4, 0, 0, 805, 53, 1, 0, 0, 0, 806, 818, 3, 184, 92, 0, 807, 808, 5, 3, 0, 0, 808, 813, 3, 188, 94, 0, 809, 810, 5, 5, 0, 0, 810, 812, 3, 188, 94, 0, 811, 809, 1, 0, 0, 0, 812, 815, 1, 0, 0, 0, 813, 811, 1, 0, 0, 0, 813, 814, 1, 0, 0, 0, 814, 816, 1, 0, 0, 0, 815, 813, 1, 0, 0, 0, 816, 817, 5, 4, 0, 0, 817, 819, 1, 0, 0, 0, 818, 807, 1, 0, 0, 0, 818, 819, 1, 0, 0, 0, 819, 820, 1, 0, 0, 0, 820, 821, 5, 33, 0, 0, 821, 822, 5, 3, 0, 0, 822, 823, 3, 86, 43, 0, 823, 824, 5, 4, 0, 0, 824, 55, 1, 0, 0, 0, 825, 827, 3, 48, 24, 0, 826, 825, 1, 0, 0, 0, 826, 827, 1, 0, 0, 0, 827, 828, 1, 0, 0, 0, 828, 829, 5, 59, 0, 0, 829, 830, 5, 75, 0, 0, 830, 833, 3, 114, 57, 0, 831, 832, 5, 148, 0, 0, 832, 834, 3, 64, 32, 0, 833, 831, 1, 0, 0, 0, 833, 834, 1, 0, 0, 0, 834, 836, 1, 0, 0, 0, 835, 837, 3, 76, 38, 0, 836, 835, 1, 0, 0, 0, 836, 837, 1, 0, 0, 0, 837, 57, 1, 0, 0, 0, 838, 840, 3, 48, 24, 0, 839, 838, 1, 0, 0, 0, 839, 840, 1, 0, 0, 0, 840, 841, 1, 0, 0, 0, 841, 842, 5, 59, 0, 0, 842, 843, 5, 75, 0, 0, 843, 846, 3, 114, 57, 0, 844, 845, 5, 148, 0, 0, 845, 847, 3, 64, 32, 0, 846, 844, 1, 0, 0, 0, 846, 847, 1, 0, 0, 0, 847, 849, 1, 0, 0, 0, 848, 850, 3, 76, 38, 0, 849, 848, 1, 0, 0, 0, 849, 850, 1, 0, 0, 0, 850, 855, 1, 0, 0, 0, 851, 853, 3, 136, 68, 0, 852, 851, 1, 0, 0, 0, 852, 853, 1, 0, 0, 0, 853, 854, 1, 0, 0, 0, 854, 856, 3, 138, 69, 0, 855, 852, 1, 0, 0, 0, 855, 856, 1, 0, 0, 0, 856, 59, 1, 0, 0, 0, 857, 859, 5, 61, 0, 0, 858, 860, 5, 55, 0, 0, 859, 858, 1, 0, 0, 0, 859, 860, 1, 0, 0, 0, 860, 861, 1, 0, 0, 0, 861, 862, 3, 182, 91, 0, 862, 61, 1, 0, 0, 0, 863, 864, 5, 63, 0, 0, 864, 867, 7, 9, 0, 0, 865, 866, 5, 80, 0, 0, 866, 868, 5, 70, 0, 0, 867, 865, 1, 0, 0, 0, 867, 868, 1, 0, 0, 0, 868, 872, 1, 0, 0, 0, 869, 870, 3, 182, 91, 0, 870, 871, 5, 2, 0, 0, 871, 873, 1, 0, 0, 0, 872, 869, 1, 0, 0, 0, 872, 873, 1, 0, 0, 0, 873, 874, 1, 0, 0, 0, 874, 875, 3, 224, 112, 0, 875, 63, 1, 0, 0, 0, 876, 877, 6, 32, -1, 0, 877, 965, 3, 68, 34, 0, 878, 965, 5, 187, 0, 0, 879, 880, 3, 182, 91, 0, 880, 881, 5, 2, 0, 0, 881, 883, 1, 0, 0, 0, 882, 879, 1, 0, 0, 0, 882, 883, 1, 0, 0, 0, 883, 884, 1, 0, 0, 0, 884, 885, 3, 184, 92, 0, 885, 886, 5, 2, 0, 0, 886, 888, 1, 0, 0, 0, 887, 882, 1, 0, 0, 0, 887, 888, 1, 0, 0, 0, 888, 889, 1, 0, 0, 0, 889, 965, 3, 188, 94, 0, 890, 891, 3, 168, 84, 0, 891, 892, 3, 64, 32, 21, 892, 965, 1, 0, 0, 0, 893, 894, 3, 180, 90, 0, 894, 907, 5, 3, 0, 0, 895, 897, 5, 62, 0, 0, 896, 895, 1, 0, 0, 0, 896, 897, 1, 0, 0, 0, 897, 898, 1, 0, 0, 0, 898, 903, 3, 64, 32, 0, 899, 900, 5, 5, 0, 0, 900, 902, 3, 64, 32, 0, 901, 899, 1, 0, 0, 0, 902, 905, 1, 0, 0, 0, 903, 901, 1, 0, 0, 0, 903, 904, 1, 0, 0, 0, 904, 908, 1, 0, 0, 0, 905, 903, 1, 0, 0, 0, 906, 908, 5, 7, 0, 0, 907, 896, 1, 0, 0, 0, 907, 906, 1, 0, 0, 0, 907, 908, 1, 0, 0, 0, 908, 909, 1, 0, 0, 0, 909, 911, 5, 4, 0, 0, 910, 912, 3, 118, 59, 0, 911, 910, 1, 0, 0, 0, 911, 912, 1, 0, 0, 0, 912, 914, 1, 0, 0, 0, 913, 915, 3, 122, 61, 0, 914, 913, 1, 0, 0, 0, 914, 915, 1, 0, 0, 0, 915, 965, 1, 0, 0, 0, 916, 917, 5, 3, 0, 0, 917, 922, 3, 64, 32, 0, 918, 919, 5, 5, 0, 0, 919, 921, 3, 64, 32, 0, 920, 918, 1, 0, 0, 0, 921, 924, 1, 0, 0, 0, 922, 920, 1, 0, 0, 0, 922, 923, 1, 0, 0, 0, 923, 925, 1, 0, 0, 0, 924, 922, 1, 0, 0, 0, 925, 926, 5, 4, 0, 0, 926, 965, 1, 0, 0, 0, 927, 928, 5, 43, 0, 0, 928, 929, 5, 3, 0, 0, 929, 930, 3, 64, 32, 0, 930, 931, 5, 33, 0, 0, 931, 932, 3, 30, 15, 0, 932, 933, 5, 4, 0, 0, 933, 965, 1, 0, 0, 0, 934, 936, 5, 102, 0, 0, 935, 934, 1, 0, 0, 0, 935, 936, 1, 0, 0, 0, 936, 937, 1, 0, 0, 0, 937, 939, 5, 70, 0, 0, 938, 935, 1, 0, 0, 0, 938, 939, 1, 0, 0, 0, 939, 940, 1, 0, 0, 0, 940, 941, 5, 3, 0, 0, 941, 942, 3, 86, 43, 0, 942, 943, 5, 4, 0, 0, 943, 965, 1, 0, 0, 0, 944, 946, 5, 42, 0, 0, 945, 947, 3, 64, 32, 0, 946, 945, 1, 0, 0, 0, 946, 947, 1, 0, 0, 0, 947, 953, 1, 0, 0, 0, 948, 949, 5, 147, 0, 0, 949, 950, 3, 64, 32, 0, 950, 951, 5, 135, 0, 0, 951, 952, 3, 64, 32, 0, 952, 954, 1, 0, 0, 0, 953, 948, 1, 0, 0, 0, 954, 955, 1, 0, 0, 0, 955, 953, 1, 0, 0, 0, 955, 956, 1, 0, 0, 0, 956, 959, 1, 0, 0, 0, 957, 958, 5, 65, 0, 0, 958, 960, 3, 64, 32, 0, 959, 957, 1, 0, 0, 0, 959, 960, 1, 0, 0, 0, 960, 961, 1, 0, 0, 0, 961, 962, 5, 66, 0, 0, 962, 965, 1, 0, 0, 0, 963, 965, 3, 66, 33, 0, 964, 876, 1, 0, 0, 0, 964, 878, 1, 0, 0, 0, 964, 887, 1, 0, 0, 0, 964, 890, 1, 0, 0, 0, 964, 893, 1, 0, 0, 0, 964, 916, 1, 0, 0, 0, 964, 927, 1, 0, 0, 0, 964, 938, 1, 0, 0, 0, 964, 944, 1, 0, 0, 0, 964, 963, 1, 0, 0, 0, 965, 1085, 1, 0, 0, 0, 966, 967, 10, 20, 0, 0, 967, 968, 5, 11, 0, 0, 968, 1084, 3, 64, 32, 21, 969, 970, 10, 19, 0, 0, 970, 971, 7, 10, 0, 0, 971, 1084, 3, 64, 32, 20, 972, 973, 10, 18, 0, 0, 973, 974, 7, 4, 0, 0, 974, 1084, 3, 64, 32, 19, 975, 976, 10, 17, 0, 0, 976, 977, 7, 11, 0, 0, 977, 1084, 3, 64, 32, 18, 978, 979, 10, 16, 0, 0, 979, 980, 7, 12, 0, 0, 980, 1084, 3, 64, 32, 17, 981, 994, 10, 15, 0, 0, 982, 995, 5, 6, 0, 0, 983, 995, 5, 22, 0, 0, 984, 995, 5, 23, 0, 0, 985, 995, 5, 24, 0, 0, 986, 995, 5, 92, 0, 0, 987, 988, 5, 92, 0, 0, 988, 995, 5, 102, 0, 0, 989, 995, 5, 83, 0, 0, 990, 995, 5, 97, 0, 0, 991, 995, 5, 77, 0, 0, 992, 995, 5, 99, 0, 0, 993, 995, 5, 118, 0, 0, 994, 982, 1, 0, 0, 0, 994, 983, 1, 0, 0, 0, 994, 984, 1, 0, 0, 0, 994, 985, 1, 0, 0, 0, 994, 986, 1, 0, 0, 0, 994, 987, 1, 0, 0, 0, 994, 989, 1, 0, 0, 0, 994, 990, 1, 0, 0, 0, 994, 991, 1, 0, 0, 0, 994, 992, 1, 0, 0, 0, 994, 993, 1, 0, 0, 0, 995, 996, 1, 0, 0, 0, 996, 1084, 3, 64, 32, 16, 997, 998, 10, 14, 0, 0, 998, 999, 5, 32, 0, 0, 999, 1084, 3, 64, 32, 15, 1000, 1001, 10, 13, 0, 0, 1001, 1002, 5, 108, 0, 0, 1002, 1084, 3, 64, 32, 14, 1003, 1004, 10, 6, 0, 0, 1004, 1006, 5, 92, 0, 0, 1005, 1007, 5, 102, 0, 0, 1006, 1005, 1, 0, 0, 0, 1006, 1007, 1, 0, 0, 0, 1007, 1008, 1, 0, 0, 0, 1008, 1084, 3, 64, 32, 7, 1009, 1011, 10, 5, 0, 0, 1010, 1012, 5, 102, 0, 0, 1011, 1010, 1, 0, 0, 0, 1011, 1012, 1, 0, 0, 0, 1012, 1013, 1, 0, 0, 0, 1013, 1014, 5, 39, 0, 0, 1014, 1015, 3, 64, 32, 0, 1015, 1016, 5, 32, 0, 0, 1016, 1017, 3, 64, 32, 6, 1017, 1084, 1, 0, 0, 0, 1018, 1019, 10, 9, 0, 0, 1019, 1020, 5, 45, 0, 0, 1020, 1084, 3, 190, 95, 0, 1021, 1023, 10, 8, 0, 0, 1022, 1024, 5, 102, 0, 0, 1023, 1022, 1, 0, 0, 0, 1023, 1024, 1, 0, 0, 0, 1024, 1025, 1, 0, 0, 0, 1025, 1026, 7, 13, 0, 0, 1026, 1029, 3, 64, 32, 0, 1027, 1028, 5, 67, 0, 0, 1028, 1030, 3, 64, 32, 0, 1029, 1027, 1, 0, 0, 0, 1029, 1030, 1, 0, 0, 0, 1030, 1084, 1, 0, 0, 0, 1031, 1036, 10, 7, 0, 0, 1032, 1037, 5, 93, 0, 0, 1033, 1037, 5, 103, 0, 0, 1034, 1035, 5, 102, 0, 0, 1035, 1037, 5, 104, 0, 0, 1036, 1032, 1, 0, 0, 0, 1036, 1033, 1, 0, 0, 0, 1036, 1034, 1, 0, 0, 0, 1037, 1084, 1, 0, 0, 0, 1038, 1040, 10, 4, 0, 0, 1039, 1041, 5, 102, 0, 0, 1040, 1039, 1, 0, 0, 0, 1040, 1041, 1, 0, 0, 0, 1041, 1042, 1, 0, 0, 0, 1042, 1081, 5, 83, 0, 0, 1043, 1053, 5, 3, 0, 0, 1044, 1054, 3, 86, 43, 0, 1045, 1050, 3, 64, 32, 0, 1046, 1047, 5, 5, 0, 0, 1047, 1049, 3, 64, 32, 0, 1048, 1046, 1, 0, 0, 0, 1049, 1052, 1, 0, 0, 0, 1050, 1048, 1, 0, 0, 0, 1050, 1051, 1, 0, 0, 0, 1051, 1054, 1, 0, 0, 0, 1052, 1050, 1, 0, 0, 0, 1053, 1044, 1, 0, 0, 0, 1053, 1045, 1, 0, 0, 0, 1053, 1054, 1, 0, 0, 0, 1054, 1055, 1, 0, 0, 0, 1055, 1082, 5, 4, 0, 0, 1056, 1057, 3, 182, 91, 0, 1057, 1058, 5, 2, 0, 0, 1058, 1060, 1, 0, 0, 0, 1059, 1056, 1, 0, 0, 0, 1059, 1060, 1, 0, 0, 0, 1060, 1061, 1, 0, 0, 0, 1061, 1082, 3, 184, 92, 0, 1062, 1063, 3, 182, 91, 0, 1063, 1064, 5, 2, 0, 0, 1064, 1066, 1, 0, 0, 0, 1065, 1062, 1, 0, 0, 0, 1065, 1066, 1, 0, 0, 0, 1066, 1067, 1, 0, 0, 0, 1067, 1068, 3, 222, 111, 0, 1068, 1077, 5, 3, 0, 0, 1069, 1074, 3, 64, 32, 0, 1070, 1071, 5, 5, 0, 0, 1071, 1073, 3, 64, 32, 0, 1072, 1070, 1, 0, 0, 0, 1073, 1076, 1, 0, 0, 0, 1074, 1072, 1, 0, 0, 0, 1074, 1075, 1, 0, 0, 0, 1075, 1078, 1, 0, 0, 0, 1076, 1074, 1, 0, 0, 0, 1077, 1069, 1, 0, 0, 0, 1077, 1078, 1, 0, 0, 0, 1078, 1079, 1, 0, 0, 0, 1079, 1080, 5, 4, 0, 0, 1080, 1082, 1, 0, 0, 0, 1081, 1043, 1, 0, 0, 0, 1081, 1059, 1, 0, 0, 0, 1081, 1065, 1, 0, 0, 0, 1082, 1084, 1, 0, 0, 0, 1083, 966, 1, 0, 0, 0, 1083, 969, 1, 0, 0, 0, 1083, 972, 1, 0, 0, 0, 1083, 975, 1, 0, 0, 0, 1083, 978, 1, 0, 0, 0, 1083, 981, 1, 0, 0, 0, 1083, 997, 1, 0, 0, 0, 1083, 1000, 1, 0, 0, 0, 1083, 1003, 1, 0, 0, 0, 1083, 1009, 1, 0, 0, 0, 1083, 1018, 1, 0, 0, 0, 1083, 1021, 1, 0, 0, 0, 1083, 1031, 1, 0, 0, 0, 1083, 1038, 1, 0, 0, 0, 1084, 1087, 1, 0, 0, 0, 1085, 1083, 1, 0, 0, 0, 1085, 1086, 1, 0, 0, 0, 1086, 65, 1, 0, 0, 0, 1087, 1085, 1, 0, 0, 0, 1088, 1089, 5, 115, 0, 0, 1089, 1094, 5, 3, 0, 0, 1090, 1095, 5, 81, 0, 0, 1091, 1092, 7, 14, 0, 0, 1092, 1093, 5, 5, 0, 0, 1093, 1095, 3, 170, 85, 0, 1094, 1090, 1, 0, 0, 0, 1094, 1091, 1, 0, 0, 0, 1095, 1096, 1, 0, 0, 0, 1096, 1097, 5, 4, 0, 0, 1097, 67, 1, 0, 0, 0, 1098, 1099, 7, 15, 0, 0, 1099, 69, 1, 0, 0, 0, 1100, 1101, 5, 3, 0, 0, 1101, 1106, 3, 64, 32, 0, 1102, 1103, 5, 5, 0, 0, 1103, 1105, 3, 64, 32, 0, 1104, 1102, 1, 0, 0, 0, 1105, 1108, 1, 0, 0, 0, 1106, 1104, 1, 0, 0, 0, 1106, 1107, 1, 0, 0, 0, 1107, 1109, 1, 0, 0, 0, 1108, 1106, 1, 0, 0, 0, 1109, 1110, 5, 4, 0, 0, 1110, 71, 1, 0, 0, 0, 1111, 1112, 5, 144, 0, 0, 1112, 1117, 3, 70, 35, 0, 1113, 1114, 5, 5, 0, 0, 1114, 1116, 3, 70, 35, 0, 1115, 1113, 1, 0, 0, 0, 1116, 1119, 1, 0, 0, 0, 1117, 1115, 1, 0, 0, 0, 1117, 1118, 1, 0, 0, 0, 1118, 73, 1, 0, 0, 0, 1119, 1117, 1, 0, 0, 0, 1120, 1122, 3, 48, 24, 0, 1121, 1120, 1, 0, 0, 0, 1121, 1122, 1, 0, 0, 0, 1122, 1128, 1, 0, 0, 0, 1123, 1129, 5, 88, 0, 0, 1124, 1129, 5, 122, 0, 0, 1125, 1126, 5, 88, 0, 0, 1126, 1127, 5, 108, 0, 0, 1127, 1129, 7, 8, 0, 0, 1128, 1123, 1, 0, 0, 0, 1128, 1124, 1, 0, 0, 0, 1128, 1125, 1, 0, 0, 0, 1129, 1130, 1, 0, 0, 0, 1130, 1134, 5, 91, 0, 0, 1131, 1132, 3, 182, 91, 0, 1132, 1133, 5, 2, 0, 0, 1133, 1135, 1, 0, 0, 0, 1134, 1131, 1, 0, 0, 0, 1134, 1135, 1, 0, 0, 0, 1135, 1136, 1, 0, 0, 0, 1136, 1139, 3, 184, 92, 0, 1137, 1138, 5, 33, 0, 0, 1138, 1140, 3, 206, 103, 0, 1139, 1137, 1, 0, 0, 0, 1139, 1140, 1, 0, 0, 0, 1140, 1152, 1, 0, 0, 0, 1141, 1142, 5, 3, 0, 0, 1142, 1147, 3, 188, 94, 0, 1143, 1144, 5, 5, 0, 0, 1144, 1146, 3, 188, 94, 0, 1145, 1143, 1, 0, 0, 0, 1146, 1149, 1, 0, 0, 0, 1147, 1145, 1, 0, 0, 0, 1147, 1148, 1, 0, 0, 0, 1148, 1150, 1, 0, 0, 0, 1149, 1147, 1, 0, 0, 0, 1150, 1151, 5, 4, 0, 0, 1151, 1153, 1, 0, 0, 0, 1152, 1141, 1, 0, 0, 0, 1152, 1153, 1, 0, 0, 0, 1153, 1163, 1, 0, 0, 0, 1154, 1157, 3, 72, 36, 0, 1155, 1157, 3, 86, 43, 0, 1156, 1154, 1, 0, 0, 0, 1156, 1155, 1, 0, 0, 0, 1157, 1159, 1, 0, 0, 0, 1158, 1160, 3, 78, 39, 0, 1159, 1158, 1, 0, 0, 0, 1159, 1160, 1, 0, 0, 0, 1160, 1164, 1, 0, 0, 0, 1161, 1162, 5, 56, 0, 0, 1162, 1164, 5, 144, 0, 0, 1163, 1156, 1, 0, 0, 0, 1163, 1161, 1, 0, 0, 0, 1164, 1166, 1, 0, 0, 0, 1165, 1167, 3, 76, 38, 0, 1166, 1165, 1, 0, 0, 0, 1166, 1167, 1, 0, 0, 0, 1167, 75, 1, 0, 0, 0, 1168, 1169, 5, 124, 0, 0, 1169, 1174, 3, 100, 50, 0, 1170, 1171, 5, 5, 0, 0, 1171, 1173, 3, 100, 50, 0, 1172, 1170, 1, 0, 0, 0, 1173, 1176, 1, 0, 0, 0, 1174, 1172, 1, 0, 0, 0, 1174, 1175, 1, 0, 0, 0, 1175, 77, 1, 0, 0, 0, 1176, 1174, 1, 0, 0, 0, 1177, 1178, 5, 107, 0, 0, 1178, 1193, 5, 48, 0, 0, 1179, 1180, 5, 3, 0, 0, 1180, 1185, 3, 24, 12, 0, 1181, 1182, 5, 5, 0, 0, 1182, 1184, 3, 24, 12, 0, 1183, 1181, 1, 0, 0, 0, 1184, 1187, 1, 0, 0, 0, 1185, 1183, 1, 0, 0, 0, 1185, 1186, 1, 0, 0, 0, 1186, 1188, 1, 0, 0, 0, 1187, 1185, 1, 0, 0, 0, 1188, 1191, 5, 4, 0, 0, 1189, 1190, 5, 148, 0, 0, 1190, 1192, 3, 64, 32, 0, 1191, 1189, 1, 0, 0, 0, 1191, 1192, 1, 0, 0, 0, 1192, 1194, 1, 0, 0, 0, 1193, 1179, 1, 0, 0, 0, 1193, 1194, 1, 0, 0, 0, 1194, 1195, 1, 0, 0, 0, 1195, 1222, 5, 183, 0, 0, 1196, 1223, 5, 184, 0, 0, 1197, 1198, 5, 141, 0, 0, 1198, 1201, 5, 131, 0, 0, 1199, 1202, 3, 188, 94, 0, 1200, 1202, 3, 110, 55, 0, 1201, 1199, 1, 0, 0, 0, 1201, 1200, 1, 0, 0, 0, 1202, 1203, 1, 0, 0, 0, 1203, 1204, 5, 6, 0, 0, 1204, 1215, 3, 64, 32, 0, 1205, 1208, 5, 5, 0, 0, 1206, 1209, 3, 188, 94, 0, 1207, 1209, 3, 110, 55, 0, 1208, 1206, 1, 0, 0, 0, 1208, 1207, 1, 0, 0, 0, 1209, 1210, 1, 0, 0, 0, 1210, 1211, 5, 6, 0, 0, 1211, 1212, 3, 64, 32, 0, 1212, 1214, 1, 0, 0, 0, 1213, 1205, 1, 0, 0, 0, 1214, 1217, 1, 0, 0, 0, 1215, 1213, 1, 0, 0, 0, 1215, 1216, 1, 0, 0, 0, 1216, 1220, 1, 0, 0, 0, 1217, 1215, 1, 0, 0, 0, 1218, 1219, 5, 148, 0, 0, 1219, 1221, 3, 64, 32, 0, 1220, 1218, 1, 0, 0, 0, 1220, 1221, 1, 0, 0, 0, 1221, 1223, 1, 0, 0, 0, 1222, 1196, 1, 0, 0, 0, 1222, 1197, 1, 0, 0, 0, 1223, 79, 1, 0, 0, 0, 1224, 1228, 5, 112, 0, 0, 1225, 1226, 3, 182, 91, 0, 1226, 1227, 5, 2, 0, 0, 1227, 1229, 1, 0, 0, 0, 1228, 1225, 1, 0, 0, 0, 1228, 1229, 1, 0, 0, 0, 1229, 1230, 1, 0, 0, 0, 1230, 1237, 3, 202, 101, 0, 1231, 1232, 5, 6, 0, 0, 1232, 1238, 3, 82, 41, 0, 1233, 1234, 5, 3, 0, 0, 1234, 1235, 3, 82, 41, 0, 1235, 1236, 5, 4, 0, 0, 1236, 1238, 1, 0, 0, 0, 1237, 1231, 1, 0, 0, 0, 1237, 1233, 1, 0, 0, 0, 1237, 1238, 1, 0, 0, 0, 1238, 81, 1, 0, 0, 0, 1239, 1243, 3, 34, 17, 0, 1240, 1243, 3, 178, 89, 0, 1241, 1243, 5, 188, 0, 0, 1242, 1239, 1, 0, 0, 0, 1242, 1240, 1, 0, 0, 0, 1242, 1241, 1, 0, 0, 0, 1243, 83, 1, 0, 0, 0, 1244, 1255, 5, 119, 0, 0, 1245, 1256, 3, 190, 95, 0, 1246, 1247, 3, 182, 91, 0, 1247, 1248, 5, 2, 0, 0, 1248, 1250, 1, 0, 0, 0, 1249, 1246, 1, 0, 0, 0, 1249, 1250, 1, 0, 0, 0, 1250, 1253, 1, 0, 0, 0, 1251, 1254, 3, 184, 92, 0, 1252, 1254, 3, 194, 97, 0, 1253, 1251, 1, 0, 0, 0, 1253, 1252, 1, 0, 0, 0, 1254, 1256, 1, 0, 0, 0, 1255, 1245, 1, 0, 0, 0, 1255, 1249, 1, 0, 0, 0, 1255, 1256, 1, 0, 0, 0, 1256, 85, 1, 0, 0, 0, 1257, 1259, 3, 134, 67, 0, 1258, 1257, 1, 0, 0, 0, 1258, 1259, 1, 0, 0, 0, 1259, 1260, 1, 0, 0, 0, 1260, 1266, 3, 90, 45, 0, 1261, 1262, 3, 106, 53, 0, 1262, 1263, 3, 90, 45, 0, 1263, 1265, 1, 0, 0, 0, 1264, 1261, 1, 0, 0, 0, 1265, 1268, 1, 0, 0, 0, 1266, 1264, 1, 0, 0, 0, 1266, 1267, 1, 0, 0, 0, 1267, 1270, 1, 0, 0, 0, 1268, 1266, 1, 0, 0, 0, 1269, 1271, 3, 136, 68, 0, 1270, 1269, 1, 0, 0, 0, 1270, 1271, 1, 0, 0, 0, 1271, 1273, 1, 0, 0, 0, 1272, 1274, 3, 138, 69, 0, 1273, 1272, 1, 0, 0, 0, 1273, 1274, 1, 0, 0, 0, 1274, 87, 1, 0, 0, 0, 1275, 1283, 3, 98, 49, 0, 1276, 1277, 3, 102, 51, 0, 1277, 1279, 3, 98, 49, 0, 1278, 1280, 3, 104, 52, 0, 1279, 1278, 1, 0, 0, 0, 1279, 1280, 1, 0, 0, 0, 1280, 1282, 1, 0, 0, 0, 1281, 1276, 1, 0, 0, 0, 1282, 1285, 1, 0, 0, 0, 1283, 1281, 1, 0, 0, 0, 1283, 1284, 1, 0, 0, 0, 1284, 89, 1, 0, 0, 0, 1285, 1283, 1, 0, 0, 0, 1286, 1288, 5, 130, 0, 0, 1287, 1289, 7, 16, 0, 0, 1288, 1287, 1, 0, 0, 0, 1288, 1289, 1, 0, 0, 0, 1289, 1290, 1, 0, 0, 0, 1290, 1295, 3, 100, 50, 0, 1291, 1292, 5, 5, 0, 0, 1292, 1294, 3, 100, 50, 0, 1293, 1291, 1, 0, 0, 0, 1294, 1297, 1, 0, 0, 0, 1295, 1293, 1, 0, 0, 0, 1295, 1296, 1, 0, 0, 0, 1296, 1310, 1, 0, 0, 0, 1297, 1295, 1, 0, 0, 0, 1298, 1308, 5, 75, 0, 0, 1299, 1304, 3, 98, 49, 0, 1300, 1301, 5, 5, 0, 0, 1301, 1303, 3, 98, 49, 0, 1302, 1300, 1, 0, 0, 0, 1303, 1306, 1, 0, 0, 0, 1304, 1302, 1, 0, 0, 0, 1304, 1305, 1, 0, 0, 0, 1305, 1309, 1, 0, 0, 0, 1306, 1304, 1, 0, 0, 0, 1307, 1309, 3, 88, 44, 0, 1308, 1299, 1, 0, 0, 0, 1308, 1307, 1, 0, 0, 0, 1309, 1311, 1, 0, 0, 0, 1310, 1298, 1, 0, 0, 0, 1310, 1311, 1, 0, 0, 0, 1311, 1314, 1, 0, 0, 0, 1312, 1313, 5, 148, 0, 0, 1313, 1315, 3, 64, 32, 0, 1314, 1312, 1, 0, 0, 0, 1314, 1315, 1, 0, 0, 0, 1315, 1330, 1, 0, 0, 0, 1316, 1317, 5, 78, 0, 0, 1317, 1318, 5, 40, 0, 0, 1318, 1323, 3, 64, 32, 0, 1319, 1320, 5, 5, 0, 0, 1320, 1322, 3, 64, 32, 0, 1321, 1319, 1, 0, 0, 0, 1322, 1325, 1, 0, 0, 0, 1323, 1321, 1, 0, 0, 0, 1323, 1324, 1, 0, 0, 0, 1324, 1328, 1, 0, 0, 0, 1325, 1323, 1, 0, 0, 0, 1326, 1327, 5, 79, 0, 0, 1327, 1329, 3, 64, 32, 0, 1328, 1326, 1, 0, 0, 0, 1328, 1329, 1, 0, 0, 0, 1329, 1331, 1, 0, 0, 0, 1330, 1316, 1, 0, 0, 0, 1330, 1331, 1, 0, 0, 0, 1331, 1346, 1, 0, 0, 0, 1332, 1333, 5, 174, 0, 0, 1333, 1334, 3, 210, 105, 0, 1334, 1335, 5, 33, 0, 0, 1335, 1343, 3, 120, 60, 0, 1336, 1337, 5, 5, 0, 0, 1337, 1338, 3, 210, 105, 0, 1338, 1339, 5, 33, 0, 0, 1339, 1340, 3, 120, 60, 0, 1340, 1342, 1, 0, 0, 0, 1341, 1336, 1, 0, 0, 0, 1342, 1345, 1, 0, 0, 0, 1343, 1341, 1, 0, 0, 0, 1343, 1344, 1, 0, 0, 0, 1344, 1347, 1, 0, 0, 0, 1345, 1343, 1, 0, 0, 0, 1346, 1332, 1, 0, 0, 0, 1346, 1347, 1, 0, 0, 0, 1347, 1350, 1, 0, 0, 0, 1348, 1350, 3, 72, 36, 0, 1349, 1286, 1, 0, 0, 0, 1349, 1348, 1, 0, 0, 0, 1350, 91, 1, 0, 0, 0, 1351, 1352, 3, 86, 43, 0, 1352, 93, 1, 0, 0, 0, 1353, 1355, 3, 134, 67, 0, 1354, 1353, 1, 0, 0, 0, 1354, 1355, 1, 0, 0, 0, 1355, 1356, 1, 0, 0, 0, 1356, 1358, 3, 90, 45, 0, 1357, 1359, 3, 136, 68, 0, 1358, 1357, 1, 0, 0, 0, 1358, 1359, 1, 0, 0, 0, 1359, 1361, 1, 0, 0, 0, 1360, 1362, 3, 138, 69, 0, 1361, 1360, 1, 0, 0, 0, 1361, 1362, 1, 0, 0, 0, 1362, 95, 1, 0, 0, 0, 1363, 1365, 3, 134, 67, 0, 1364, 1363, 1, 0, 0, 0, 1364, 1365, 1, 0, 0, 0, 1365, 1366, 1, 0, 0, 0, 1366, 1376, 3, 90, 45, 0, 1367, 1369, 5, 139, 0, 0, 1368, 1370, 5, 29, 0, 0, 1369, 1368, 1, 0, 0, 0, 1369, 1370, 1, 0, 0, 0, 1370, 1374, 1, 0, 0, 0, 1371, 1374, 5, 90, 0, 0, 1372, 1374, 5, 68, 0, 0, 1373, 1367, 1, 0, 0, 0, 1373, 1371, 1, 0, 0, 0, 1373, 1372, 1, 0, 0, 0, 1374, 1375, 1, 0, 0, 0, 1375, 1377, 3, 90, 45, 0, 1376, 1373, 1, 0, 0, 0, 1377, 1378, 1, 0, 0, 0, 1378, 1376, 1, 0, 0, 0, 1378, 1379, 1, 0, 0, 0, 1379, 1381, 1, 0, 0, 0, 1380, 1382, 3, 136, 68, 0, 1381, 1380, 1, 0, 0, 0, 1381, 1382, 1, 0, 0, 0, 1382, 1384, 1, 0, 0, 0, 1383, 1385, 3, 138, 69, 0, 1384, 1383, 1, 0, 0, 0, 1384, 1385, 1, 0, 0, 0, 1385, 97, 1, 0, 0, 0, 1386, 1387, 3, 182, 91, 0, 1387, 1388, 5, 2, 0, 0, 1388, 1390, 1, 0, 0, 0, 1389, 1386, 1, 0, 0, 0, 1389, 1390, 1, 0, 0, 0, 1390, 1391, 1, 0, 0, 0, 1391, 1396, 3, 184, 92, 0, 1392, 1394, 5, 33, 0, 0, 1393, 1392, 1, 0, 0, 0, 1393, 1394, 1, 0, 0, 0, 1394, 1395, 1, 0, 0, 0, 1395, 1397, 3, 206, 103, 0, 1396, 1393, 1, 0, 0, 0, 1396, 1397, 1, 0, 0, 0, 1397, 1403, 1, 0, 0, 0, 1398, 1399, 5, 85, 0, 0, 1399, 1400, 5, 40, 0, 0, 1400, 1404, 3, 194, 97, 0, 1401, 1402, 5, 102, 0, 0, 1402, 1404, 5, 85, 0, 0, 1403, 1398, 1, 0, 0, 0, 1403, 1401, 1, 0, 0, 0, 1403, 1404, 1, 0, 0, 0, 1404, 1451, 1, 0, 0, 0, 1405, 1406, 3, 182, 91, 0, 1406, 1407, 5, 2, 0, 0, 1407, 1409, 1, 0, 0, 0, 1408, 1405, 1, 0, 0, 0, 1408, 1409, 1, 0, 0, 0, 1409, 1410, 1, 0, 0, 0, 1410, 1411, 3, 222, 111, 0, 1411, 1412, 5, 3, 0, 0, 1412, 1417, 3, 64, 32, 0, 1413, 1414, 5, 5, 0, 0, 1414, 1416, 3, 64, 32, 0, 1415, 1413, 1, 0, 0, 0, 1416, 1419, 1, 0, 0, 0, 1417, 1415, 1, 0, 0, 0, 1417, 1418, 1, 0, 0, 0, 1418, 1420, 1, 0, 0, 0, 1419, 1417, 1, 0, 0, 0, 1420, 1425, 5, 4, 0, 0, 1421, 1423, 5, 33, 0, 0, 1422, 1421, 1, 0, 0, 0, 1422, 1423, 1, 0, 0, 0, 1423, 1424, 1, 0, 0, 0, 1424, 1426, 3, 206, 103, 0, 1425, 1422, 1, 0, 0, 0, 1425, 1426, 1, 0, 0, 0, 1426, 1451, 1, 0, 0, 0, 1427, 1437, 5, 3, 0, 0, 1428, 1433, 3, 98, 49, 0, 1429, 1430, 5, 5, 0, 0, 1430, 1432, 3, 98, 49, 0, 1431, 1429, 1, 0, 0, 0, 1432, 1435, 1, 0, 0, 0, 1433, 1431, 1, 0, 0, 0, 1433, 1434, 1, 0, 0, 0, 1434, 1438, 1, 0, 0, 0, 1435, 1433, 1, 0, 0, 0, 1436, 1438, 3, 88, 44, 0, 1437, 1428, 1, 0, 0, 0, 1437, 1436, 1, 0, 0, 0, 1438, 1439, 1, 0, 0, 0, 1439, 1440, 5, 4, 0, 0, 1440, 1451, 1, 0, 0, 0, 1441, 1442, 5, 3, 0, 0, 1442, 1443, 3, 86, 43, 0, 1443, 1448, 5, 4, 0, 0, 1444, 1446, 5, 33, 0, 0, 1445, 1444, 1, 0, 0, 0, 1445, 1446, 1, 0, 0, 0, 1446, 1447, 1, 0, 0, 0, 1447, 1449, 3, 206, 103, 0, 1448, 1445, 1, 0, 0, 0, 1448, 1449, 1, 0, 0, 0, 1449, 1451, 1, 0, 0, 0, 1450, 1389, 1, 0, 0, 0, 1450, 1408, 1, 0, 0, 0, 1450, 1427, 1, 0, 0, 0, 1450, 1441, 1, 0, 0, 0, 1451, 99, 1, 0, 0, 0, 1452, 1465, 5, 7, 0, 0, 1453, 1454, 3, 184, 92, 0, 1454, 1455, 5, 2, 0, 0, 1455, 1456, 5, 7, 0, 0, 1456, 1465, 1, 0, 0, 0, 1457, 1462, 3, 64, 32, 0, 1458, 1460, 5, 33, 0, 0, 1459, 1458, 1, 0, 0, 0, 1459, 1460, 1, 0, 0, 0, 1460, 1461, 1, 0, 0, 0, 1461, 1463, 3, 174, 87, 0, 1462, 1459, 1, 0, 0, 0, 1462, 1463, 1, 0, 0, 0, 1463, 1465, 1, 0, 0, 0, 1464, 1452, 1, 0, 0, 0, 1464, 1453, 1, 0, 0, 0, 1464, 1457, 1, 0, 0, 0, 1465, 101, 1, 0, 0, 0, 1466, 1480, 5, 5, 0, 0, 1467, 1469, 5, 100, 0, 0, 1468, 1467, 1, 0, 0, 0, 1468, 1469, 1, 0, 0, 0, 1469, 1476, 1, 0, 0, 0, 1470, 1472, 5, 96, 0, 0, 1471, 1473, 5, 110, 0, 0, 1472, 1471, 1, 0, 0, 0, 1472, 1473, 1, 0, 0, 0, 1473, 1477, 1, 0, 0, 0, 1474, 1477, 5, 87, 0, 0, 1475, 1477, 5, 51, 0, 0, 1476, 1470, 1, 0, 0, 0, 1476, 1474, 1, 0, 0, 0, 1476, 1475, 1, 0, 0, 0, 1476, 1477, 1, 0, 0, 0, 1477, 1478, 1, 0, 0, 0, 1478, 1480, 5, 94, 0, 0, 1479, 1466, 1, 0, 0, 0, 1479, 1468, 1, 0, 0, 0, 1480, 103, 1, 0, 0, 0, 1481, 1482, 5, 107, 0, 0, 1482, 1496, 3, 64, 32, 0, 1483, 1484, 5, 142, 0, 0, 1484, 1485, 5, 3, 0, 0, 1485, 1490, 3, 188, 94, 0, 1486, 1487, 5, 5, 0, 0, 1487, 1489, 3, 188, 94, 0, 1488, 1486, 1, 0, 0, 0, 1489, 1492, 1, 0, 0, 0, 1490, 1488, 1, 0, 0, 0, 1490, 1491, 1, 0, 0, 0, 1491, 1493, 1, 0, 0, 0, 1492, 1490, 1, 0, 0, 0, 1493, 1494, 5, 4, 0, 0, 1494, 1496, 1, 0, 0, 0, 1495, 1481, 1, 0, 0, 0, 1495, 1483, 1, 0, 0, 0, 1496, 105, 1, 0, 0, 0, 1497, 1499, 5, 139, 0, 0, 1498, 1500, 5, 29, 0, 0, 1499, 1498, 1, 0, 0, 0, 1499, 1500, 1, 0, 0, 0, 1500, 1504, 1, 0, 0, 0, 1501, 1504, 5, 90, 0, 0, 1502, 1504, 5, 68, 0, 0, 1503, 1497, 1, 0, 0, 0, 1503, 1501, 1, 0, 0, 0, 1503, 1502, 1, 0, 0, 0, 1504, 107, 1, 0, 0, 0, 1505, 1507, 3, 48, 24, 0, 1506, 1505, 1, 0, 0, 0, 1506, 1507, 1, 0, 0, 0, 1507, 1508, 1, 0, 0, 0, 1508, 1511, 5, 141, 0, 0, 1509, 1510, 5, 108, 0, 0, 1510, 1512, 7, 8, 0, 0, 1511, 1509, 1, 0, 0, 0, 1511, 1512, 1, 0, 0, 0, 1512, 1513, 1, 0, 0, 0, 1513, 1514, 3, 114, 57, 0, 1514, 1517, 5, 131, 0, 0, 1515, 1518, 3, 188, 94, 0, 1516, 1518, 3, 110, 55, 0, 1517, 1515, 1, 0, 0, 0, 1517, 1516, 1, 0, 0, 0, 1518, 1519, 1, 0, 0, 0, 1519, 1520, 5, 6, 0, 0, 1520, 1531, 3, 64, 32, 0, 1521, 1524, 5, 5, 0, 0, 1522, 1525, 3, 188, 94, 0, 1523, 1525, 3, 110, 55, 0, 1524, 1522, 1, 0, 0, 0, 1524, 1523, 1, 0, 0, 0, 1525, 1526, 1, 0, 0, 0, 1526, 1527, 5, 6, 0, 0, 1527, 1528, 3, 64, 32, 0, 1528, 1530, 1, 0, 0, 0, 1529, 1521, 1, 0, 0, 0, 1530, 1533, 1, 0, 0, 0, 1531, 1529, 1, 0, 0, 0, 1531, 1532, 1, 0, 0, 0, 1532, 1546, 1, 0, 0, 0, 1533, 1531, 1, 0, 0, 0, 1534, 1544, 5, 75, 0, 0, 1535, 1540, 3, 98, 49, 0, 1536, 1537, 5, 5, 0, 0, 1537, 1539, 3, 98, 49, 0, 1538, 1536, 1, 0, 0, 0, 1539, 1542, 1, 0, 0, 0, 1540, 1538, 1, 0, 0, 0, 1540, 1541, 1, 0, 0, 0, 1541, 1545, 1, 0, 0, 0, 1542, 1540, 1, 0, 0, 0, 1543, 1545, 3, 88, 44, 0, 1544, 1535, 1, 0, 0, 0, 1544, 1543, 1, 0, 0, 0, 1545, 1547, 1, 0, 0, 0, 1546, 1534, 1, 0, 0, 0, 1546, 1547, 1, 0, 0, 0, 1547, 1550, 1, 0, 0, 0, 1548, 1549, 5, 148, 0, 0, 1549, 1551, 3, 64, 32, 0, 1550, 1548, 1, 0, 0, 0, 1550, 1551, 1, 0, 0, 0, 1551, 1553, 1, 0, 0, 0, 1552, 1554, 3, 76, 38, 0, 1553, 1552, 1, 0, 0, 0, 1553, 1554, 1, 0, 0, 0, 1554, 109, 1, 0, 0, 0, 1555, 1556, 5, 3, 0, 0, 1556, 1561, 3, 188, 94, 0, 1557, 1558, 5, 5, 0, 0, 1558, 1560, 3, 188, 94, 0, 1559, 1557, 1, 0, 0, 0, 1560, 1563, 1, 0, 0, 0, 1561, 1559, 1, 0, 0, 0, 1561, 1562, 1, 0, 0, 0, 1562, 1564, 1, 0, 0, 0, 1563, 1561, 1, 0, 0, 0, 1564, 1565, 5, 4, 0, 0, 1565, 111, 1, 0, 0, 0, 1566, 1568, 3, 48, 24, 0, 1567, 1566, 1, 0, 0, 0, 1567, 1568, 1, 0, 0, 0, 1568, 1569, 1, 0, 0, 0, 1569, 1572, 5, 141, 0, 0, 1570, 1571, 5, 108, 0, 0, 1571, 1573, 7, 8, 0, 0, 1572, 1570, 1, 0, 0, 0, 1572, 1573, 1, 0, 0, 0, 1573, 1574, 1, 0, 0, 0, 1574, 1575, 3, 114, 57, 0, 1575, 1578, 5, 131, 0, 0, 1576, 1579, 3, 188, 94, 0, 1577, 1579, 3, 110, 55, 0, 1578, 1576, 1, 0, 0, 0, 1578, 1577, 1, 0, 0, 0, 1579, 1580, 1, 0, 0, 0, 1580, 1581, 5, 6, 0, 0, 1581, 1592, 3, 64, 32, 0, 1582, 1585, 5, 5, 0, 0, 1583, 1586, 3, 188, 94, 0, 1584, 1586, 3, 110, 55, 0, 1585, 1583, 1, 0, 0, 0, 1585, 1584, 1, 0, 0, 0, 1586, 1587, 1, 0, 0, 0, 1587, 1588, 5, 6, 0, 0, 1588, 1589, 3, 64, 32, 0, 1589, 1591, 1, 0, 0, 0, 1590, 1582, 1, 0, 0, 0, 1591, 1594, 1, 0, 0, 0, 1592, 1590, 1, 0, 0, 0, 1592, 1593, 1, 0, 0, 0, 1593, 1597, 1, 0, 0, 0, 1594, 1592, 1, 0, 0, 0, 1595, 1596, 5, 148, 0, 0, 1596, 1598, 3, 64, 32, 0, 1597, 1595, 1, 0, 0, 0, 1597, 1598, 1, 0, 0, 0, 1598, 1600, 1, 0, 0, 0, 1599, 1601, 3, 76, 38, 0, 1600, 1599, 1, 0, 0, 0, 1600, 1601, 1, 0, 0, 0, 1601, 1606, 1, 0, 0, 0, 1602, 1604, 3, 136, 68, 0, 1603, 1602, 1, 0, 0, 0, 1603, 1604, 1, 0, 0, 0, 1604, 1605, 1, 0, 0, 0, 1605, 1607, 3, 138, 69, 0, 1606, 1603, 1, 0, 0, 0, 1606, 1607, 1, 0, 0, 0, 1607, 113, 1, 0, 0, 0, 1608, 1609, 3, 182, 91, 0, 1609, 1610, 5, 2, 0, 0, 1610, 1612, 1, 0, 0, 0, 1611, 1608, 1, 0, 0, 0, 1611, 1612, 1, 0, 0, 0, 1612, 1613, 1, 0, 0, 0, 1613, 1616, 3, 184, 92, 0, 1614, 1615, 5, 33, 0, 0, 1615, 1617, 3, 212, 106, 0, 1616, 1614, 1, 0, 0, 0, 1616, 1617, 1, 0, 0, 0, 1617, 1623, 1, 0, 0, 0, 1618, 1619, 5, 85, 0, 0, 1619, 1620, 5, 40, 0, 0, 1620, 1624, 3, 194, 97, 0, 1621, 1622, 5, 102, 0, 0, 1622, 1624, 5, 85, 0, 0, 1623, 1618, 1, 0, 0, 0, 1623, 1621, 1, 0, 0, 0, 1623, 1624, 1, 0, 0, 0, 1624, 115, 1, 0, 0, 0, 1625, 1627, 5, 143, 0, 0, 1626, 1628, 3, 182, 91, 0, 1627, 1626, 1, 0, 0, 0, 1627, 1628, 1, 0, 0, 0, 1628, 1631, 1, 0, 0, 0, 1629, 1630, 5, 91, 0, 0, 1630, 1632, 3, 214, 107, 0, 1631, 1629, 1, 0, 0, 0, 1631, 1632, 1, 0, 0, 0, 1632, 117, 1, 0, 0, 0, 1633, 1634, 5, 178, 0, 0, 1634, 1635, 5, 3, 0, 0, 1635, 1636, 5, 148, 0, 0, 1636, 1637, 3, 64, 32, 0, 1637, 1638, 5, 4, 0, 0, 1638, 119, 1, 0, 0, 0, 1639, 1641, 5, 3, 0, 0, 1640, 1642, 3, 216, 108, 0, 1641, 1640, 1, 0, 0, 0, 1641, 1642, 1, 0, 0, 0, 1642, 1653, 1, 0, 0, 0, 1643, 1644, 5, 153, 0, 0, 1644, 1645, 5, 40, 0, 0, 1645, 1650, 3, 64, 32, 0, 1646, 1647, 5, 5, 0, 0, 1647, 1649, 3, 64, 32, 0, 1648, 1646, 1, 0, 0, 0, 1649, 1652, 1, 0, 0, 0, 1650, 1648, 1, 0, 0, 0, 1650, 1651, 1, 0, 0, 0, 1651, 1654, 1, 0, 0, 0, 1652, 1650, 1, 0, 0, 0, 1653, 1643, 1, 0, 0, 0, 1653, 1654, 1, 0, 0, 0, 1654, 1655, 1, 0, 0, 0, 1655, 1656, 5, 109, 0, 0, 1656, 1657, 5, 40, 0, 0, 1657, 1662, 3, 140, 70, 0, 1658, 1659, 5, 5, 0, 0, 1659, 1661, 3, 140, 70, 0, 1660, 1658, 1, 0, 0, 0, 1661, 1664, 1, 0, 0, 0, 1662, 1660, 1, 0, 0, 0, 1662, 1663, 1, 0, 0, 0, 1663, 1666, 1, 0, 0, 0, 1664, 1662, 1, 0, 0, 0, 1665, 1667, 3, 124, 62, 0, 1666, 1665, 1, 0, 0, 0, 1666, 1667, 1, 0, 0, 0, 1667, 1668, 1, 0, 0, 0, 1668, 1669, 5, 4, 0, 0, 1669, 121, 1, 0, 0, 0, 1670, 1704, 5, 152, 0, 0, 1671, 1705, 3, 210, 105, 0, 1672, 1674, 5, 3, 0, 0, 1673, 1675, 3, 216, 108, 0, 1674, 1673, 1, 0, 0, 0, 1674, 1675, 1, 0, 0, 0, 1675, 1686, 1, 0, 0, 0, 1676, 1677, 5, 153, 0, 0, 1677, 1678, 5, 40, 0, 0, 1678, 1683, 3, 64, 32, 0, 1679, 1680, 5, 5, 0, 0, 1680, 1682, 3, 64, 32, 0, 1681, 1679, 1, 0, 0, 0, 1682, 1685, 1, 0, 0, 0, 1683, 1681, 1, 0, 0, 0, 1683, 1684, 1, 0, 0, 0, 1684, 1687, 1, 0, 0, 0, 1685, 1683, 1, 0, 0, 0, 1686, 1676, 1, 0, 0, 0, 1686, 1687, 1, 0, 0, 0, 1687, 1698, 1, 0, 0, 0, 1688, 1689, 5, 109, 0, 0, 1689, 1690, 5, 40, 0, 0, 1690, 1695, 3, 140, 70, 0, 1691, 1692, 5, 5, 0, 0, 1692, 1694, 3, 140, 70, 0, 1693, 1691, 1, 0, 0, 0, 1694, 1697, 1, 0, 0, 0, 1695, 1693, 1, 0, 0, 0, 1695, 1696, 1, 0, 0, 0, 1696, 1699, 1, 0, 0, 0, 1697, 1695, 1, 0, 0, 0, 1698, 1688, 1, 0, 0, 0, 1698, 1699, 1, 0, 0, 0, 1699, 1701, 1, 0, 0, 0, 1700, 1702, 3, 124, 62, 0, 1701, 1700, 1, 0, 0, 0, 1701, 1702, 1, 0, 0, 0, 1702, 1703, 1, 0, 0, 0, 1703, 1705, 5, 4, 0, 0, 1704, 1671, 1, 0, 0, 0, 1704, 1672, 1, 0, 0, 0, 1705, 123, 1, 0, 0, 0, 1706, 1716, 3, 126, 63, 0, 1707, 1714, 5, 180, 0, 0, 1708, 1709, 5, 101, 0, 0, 1709, 1715, 5, 182, 0, 0, 1710, 1711, 5, 157, 0, 0, 1711, 1715, 5, 127, 0, 0, 1712, 1715, 5, 78, 0, 0, 1713, 1715, 5, 181, 0, 0, 1714, 1708, 1, 0, 0, 0, 1714, 1710, 1, 0, 0, 0, 1714, 1712, 1, 0, 0, 0, 1714, 1713, 1, 0, 0, 0, 1715, 1717, 1, 0, 0, 0, 1716, 1707, 1, 0, 0, 0, 1716, 1717, 1, 0, 0, 0, 1717, 125, 1, 0, 0, 0, 1718, 1725, 7, 17, 0, 0, 1719, 1726, 3, 148, 74, 0, 1720, 1721, 5, 39, 0, 0, 1721, 1722, 3, 144, 72, 0, 1722, 1723, 5, 32, 0, 0, 1723, 1724, 3, 146, 73, 0, 1724, 1726, 1, 0, 0, 0, 1725, 1719, 1, 0, 0, 0, 1725, 1720, 1, 0, 0, 0, 1726, 127, 1, 0, 0, 0, 1727, 1728, 3, 218, 109, 0, 1728, 1738, 5, 3, 0, 0, 1729, 1734, 3, 64, 32, 0, 1730, 1731, 5, 5, 0, 0, 1731, 1733, 3, 64, 32, 0, 1732, 1730, 1, 0, 0, 0, 1733, 1736, 1, 0, 0, 0, 1734, 1732, 1, 0, 0, 0, 1734, 1735, 1, 0, 0, 0, 1735, 1739, 1, 0, 0, 0, 1736, 1734, 1, 0, 0, 0, 1737, 1739, 5, 7, 0, 0, 1738, 1729, 1, 0, 0, 0, 1738, 1737, 1, 0, 0, 0, 1739, 1740, 1, 0, 0, 0, 1740, 1741, 5, 4, 0, 0, 1741, 129, 1, 0, 0, 0, 1742, 1743, 3, 220, 110, 0, 1743, 1756, 5, 3, 0, 0, 1744, 1746, 5, 62, 0, 0, 1745, 1744, 1, 0, 0, 0, 1745, 1746, 1, 0, 0, 0, 1746, 1747, 1, 0, 0, 0, 1747, 1752, 3, 64, 32, 0, 1748, 1749, 5, 5, 0, 0, 1749, 1751, 3, 64, 32, 0, 1750, 1748, 1, 0, 0, 0, 1751, 1754, 1, 0, 0, 0, 1752, 1750, 1, 0, 0, 0, 1752, 1753, 1, 0, 0, 0, 1753, 1757, 1, 0, 0, 0, 1754, 1752, 1, 0, 0, 0, 1755, 1757, 5, 7, 0, 0, 1756, 1745, 1, 0, 0, 0, 1756, 1755, 1, 0, 0, 0, 1756, 1757, 1, 0, 0, 0, 1757, 1758, 1, 0, 0, 0, 1758, 1760, 5, 4, 0, 0, 1759, 1761, 3, 118, 59, 0, 1760, 1759, 1, 0, 0, 0, 1760, 1761, 1, 0, 0, 0, 1761, 131, 1, 0, 0, 0, 1762, 1763, 3, 150, 75, 0, 1763, 1773, 5, 3, 0, 0, 1764, 1769, 3, 64, 32, 0, 1765, 1766, 5, 5, 0, 0, 1766, 1768, 3, 64, 32, 0, 1767, 1765, 1, 0, 0, 0, 1768, 1771, 1, 0, 0, 0, 1769, 1767, 1, 0, 0, 0, 1769, 1770, 1, 0, 0, 0, 1770, 1774, 1, 0, 0, 0, 1771, 1769, 1, 0, 0, 0, 1772, 1774, 5, 7, 0, 0, 1773, 1764, 1, 0, 0, 0, 1773, 1772, 1, 0, 0, 0, 1773, 1774, 1, 0, 0, 0, 1774, 1775, 1, 0, 0, 0, 1775, 1777, 5, 4, 0, 0, 1776, 1778, 3, 118, 59, 0, 1777, 1776, 1, 0, 0, 0, 1777, 1778, 1, 0, 0, 0, 1778, 1779, 1, 0, 0, 0, 1779, 1782, 5, 152, 0, 0, 1780, 1783, 3, 120, 60, 0, 1781, 1783, 3, 210, 105, 0, 1782, 1780, 1, 0, 0, 0, 1782, 1781, 1, 0, 0, 0, 1783, 133, 1, 0, 0, 0, 1784, 1786, 5, 149, 0, 0, 1785, 1787, 5, 116, 0, 0, 1786, 1785, 1, 0, 0, 0, 1786, 1787, 1, 0, 0, 0, 1787, 1788, 1, 0, 0, 0, 1788, 1793, 3, 54, 27, 0, 1789, 1790, 5, 5, 0, 0, 1790, 1792, 3, 54, 27, 0, 1791, 1789, 1, 0, 0, 0, 1792, 1795, 1, 0, 0, 0, 1793, 1791, 1, 0, 0, 0, 1793, 1794, 1, 0, 0, 0, 1794, 135, 1, 0, 0, 0, 1795, 1793, 1, 0, 0, 0, 1796, 1797, 5, 109, 0, 0, 1797, 1798, 5, 40, 0, 0, 1798, 1803, 3, 140, 70, 0, 1799, 1800, 5, 5, 0, 0, 1800, 1802, 3, 140, 70, 0, 1801, 1799, 1, 0, 0, 0, 1802, 1805, 1, 0, 0, 0, 1803, 1801, 1, 0, 0, 0, 1803, 1804, 1, 0, 0, 0, 1804, 137, 1, 0, 0, 0, 1805, 1803, 1, 0, 0, 0, 1806, 1807, 5, 98, 0, 0, 1807, 1810, 3, 64, 32, 0, 1808, 1809, 7, 18, 0, 0, 1809, 1811, 3, 64, 32, 0, 1810, 1808, 1, 0, 0, 0, 1810, 1811, 1, 0, 0, 0, 1811, 139, 1, 0, 0, 0, 1812, 1815, 3, 64, 32, 0, 1813, 1814, 5, 45, 0, 0, 1814, 1816, 3, 190, 95, 0, 1815, 1813, 1, 0, 0, 0, 1815, 1816, 1, 0, 0, 0, 1816, 1818, 1, 0, 0, 0, 1817, 1819, 3, 142, 71, 0, 1818, 1817, 1, 0, 0, 0, 1818, 1819, 1, 0, 0, 0, 1819, 1822, 1, 0, 0, 0, 1820, 1821, 5, 175, 0, 0, 1821, 1823, 7, 19, 0, 0, 1822, 1820, 1, 0, 0, 0, 1822, 1823, 1, 0, 0, 0, 1823, 141, 1, 0, 0, 0, 1824, 1825, 7, 20, 0, 0, 1825, 143, 1, 0, 0, 0, 1826, 1827, 3, 64, 32, 0, 1827, 1828, 5, 155, 0, 0, 1828, 1837, 1, 0, 0, 0, 1829, 1830, 3, 64, 32, 0, 1830, 1831, 5, 158, 0, 0, 1831, 1837, 1, 0, 0, 0, 1832, 1833, 5, 157, 0, 0, 1833, 1837, 5, 127, 0, 0, 1834, 1835, 5, 156, 0, 0, 1835, 1837, 5, 155, 0, 0, 1836, 1826, 1, 0, 0, 0, 1836, 1829, 1, 0, 0, 0, 1836, 1832, 1, 0, 0, 0, 1836, 1834, 1, 0, 0, 0, 1837, 145, 1, 0, 0, 0, 1838, 1839, 3, 64, 32, 0, 1839, 1840, 5, 155, 0, 0, 1840, 1849, 1, 0, 0, 0, 1841, 1842, 3, 64, 32, 0, 1842, 1843, 5, 158, 0, 0, 1843, 1849, 1, 0, 0, 0, 1844, 1845, 5, 157, 0, 0, 1845, 1849, 5, 127, 0, 0, 1846, 1847, 5, 156, 0, 0, 1847, 1849, 5, 158, 0, 0, 1848, 1838, 1, 0, 0, 0, 1848, 1841, 1, 0, 0, 0, 1848, 1844, 1, 0, 0, 0, 1848, 1846, 1, 0, 0, 0, 1849, 147, 1, 0, 0, 0, 1850, 1851, 3, 64, 32, 0, 1851, 1852, 5, 155, 0, 0, 1852, 1858, 1, 0, 0, 0, 1853, 1854, 5, 156, 0, 0, 1854, 1858, 5, 155, 0, 0, 1855, 1856, 5, 157, 0, 0, 1856, 1858, 5, 127, 0, 0, 1857, 1850, 1, 0, 0, 0, 1857, 1853, 1, 0, 0, 0, 1857, 1855, 1, 0, 0, 0, 1858, 149, 1, 0, 0, 0, 1859, 1860, 7, 21, 0, 0, 1860, 1861, 5, 3, 0, 0, 1861, 1862, 3, 64, 32, 0, 1862, 1863, 5, 4, 0, 0, 1863, 1864, 5, 152, 0, 0, 1864, 1866, 5, 3, 0, 0, 1865, 1867, 3, 156, 78, 0, 1866, 1865, 1, 0, 0, 0, 1866, 1867, 1, 0, 0, 0, 1867, 1868, 1, 0, 0, 0, 1868, 1870, 3, 160, 80, 0, 1869, 1871, 3, 126, 63, 0, 1870, 1869, 1, 0, 0, 0, 1870, 1871, 1, 0, 0, 0, 1871, 1872, 1, 0, 0, 0, 1872, 1873, 5, 4, 0, 0, 1873, 1945, 1, 0, 0, 0, 1874, 1875, 7, 22, 0, 0, 1875, 1876, 5, 3, 0, 0, 1876, 1877, 5, 4, 0, 0, 1877, 1878, 5, 152, 0, 0, 1878, 1880, 5, 3, 0, 0, 1879, 1881, 3, 156, 78, 0, 1880, 1879, 1, 0, 0, 0, 1880, 1881, 1, 0, 0, 0, 1881, 1883, 1, 0, 0, 0, 1882, 1884, 3, 158, 79, 0, 1883, 1882, 1, 0, 0, 0, 1883, 1884, 1, 0, 0, 0, 1884, 1885, 1, 0, 0, 0, 1885, 1945, 5, 4, 0, 0, 1886, 1887, 7, 23, 0, 0, 1887, 1888, 5, 3, 0, 0, 1888, 1889, 5, 4, 0, 0, 1889, 1890, 5, 152, 0, 0, 1890, 1892, 5, 3, 0, 0, 1891, 1893, 3, 156, 78, 0, 1892, 1891, 1, 0, 0, 0, 1892, 1893, 1, 0, 0, 0, 1893, 1894, 1, 0, 0, 0, 1894, 1895, 3, 160, 80, 0, 1895, 1896, 5, 4, 0, 0, 1896, 1945, 1, 0, 0, 0, 1897, 1898, 7, 24, 0, 0, 1898, 1899, 5, 3, 0, 0, 1899, 1901, 3, 64, 32, 0, 1900, 1902, 3, 152, 76, 0, 1901, 1900, 1, 0, 0, 0, 1901, 1902, 1, 0, 0, 0, 1902, 1904, 1, 0, 0, 0, 1903, 1905, 3, 154, 77, 0, 1904, 1903, 1, 0, 0, 0, 1904, 1905, 1, 0, 0, 0, 1905, 1906, 1, 0, 0, 0, 1906, 1907, 5, 4, 0, 0, 1907, 1908, 5, 152, 0, 0, 1908, 1910, 5, 3, 0, 0, 1909, 1911, 3, 156, 78, 0, 1910, 1909, 1, 0, 0, 0, 1910, 1911, 1, 0, 0, 0, 1911, 1912, 1, 0, 0, 0, 1912, 1913, 3, 160, 80, 0, 1913, 1914, 5, 4, 0, 0, 1914, 1945, 1, 0, 0, 0, 1915, 1916, 5, 164, 0, 0, 1916, 1917, 5, 3, 0, 0, 1917, 1918, 3, 64, 32, 0, 1918, 1919, 5, 5, 0, 0, 1919, 1920, 3, 34, 17, 0, 1920, 1921, 5, 4, 0, 0, 1921, 1922, 5, 152, 0, 0, 1922, 1924, 5, 3, 0, 0, 1923, 1925, 3, 156, 78, 0, 1924, 1923, 1, 0, 0, 0, 1924, 1925, 1, 0, 0, 0, 1925, 1926, 1, 0, 0, 0, 1926, 1928, 3, 160, 80, 0, 1927, 1929, 3, 126, 63, 0, 1928, 1927, 1, 0, 0, 0, 1928, 1929, 1, 0, 0, 0, 1929, 1930, 1, 0, 0, 0, 1930, 1931, 5, 4, 0, 0, 1931, 1945, 1, 0, 0, 0, 1932, 1933, 5, 165, 0, 0, 1933, 1934, 5, 3, 0, 0, 1934, 1935, 3, 64, 32, 0, 1935, 1936, 5, 4, 0, 0, 1936, 1937, 5, 152, 0, 0, 1937, 1939, 5, 3, 0, 0, 1938, 1940, 3, 156, 78, 0, 1939, 1938, 1, 0, 0, 0, 1939, 1940, 1, 0, 0, 0, 1940, 1941, 1, 0, 0, 0, 1941, 1942, 3, 160, 80, 0, 1942, 1943, 5, 4, 0, 0, 1943, 1945, 1, 0, 0, 0, 1944, 1859, 1, 0, 0, 0, 1944, 1874, 1, 0, 0, 0, 1944, 1886, 1, 0, 0, 0, 1944, 1897, 1, 0, 0, 0, 1944, 1915, 1, 0, 0, 0, 1944, 1932, 1, 0, 0, 0, 1945, 151, 1, 0, 0, 0, 1946, 1947, 5, 5, 0, 0, 1947, 1948, 3, 34, 17, 0, 1948, 153, 1, 0, 0, 0, 1949, 1950, 5, 5, 0, 0, 1950, 1951, 3, 34, 17, 0, 1951, 155, 1, 0, 0, 0, 1952, 1953, 5, 153, 0, 0, 1953, 1955, 5, 40, 0, 0, 1954, 1956, 3, 64, 32, 0, 1955, 1954, 1, 0, 0, 0, 1956, 1957, 1, 0, 0, 0, 1957, 1955, 1, 0, 0, 0, 1957, 1958, 1, 0, 0, 0, 1958, 157, 1, 0, 0, 0, 1959, 1960, 5, 109, 0, 0, 1960, 1962, 5, 40, 0, 0, 1961, 1963, 3, 64, 32, 0, 1962, 1961, 1, 0, 0, 0, 1963, 1964, 1, 0, 0, 0, 1964, 1962, 1, 0, 0, 0, 1964, 1965, 1, 0, 0, 0, 1965, 159, 1, 0, 0, 0, 1966, 1967, 5, 109, 0, 0, 1967, 1968, 5, 40, 0, 0, 1968, 1969, 3, 162, 81, 0, 1969, 161, 1, 0, 0, 0, 1970, 1972, 3, 64, 32, 0, 1971, 1973, 3, 142, 71, 0, 1972, 1971, 1, 0, 0, 0, 1972, 1973, 1, 0, 0, 0, 1973, 1981, 1, 0, 0, 0, 1974, 1975, 5, 5, 0, 0, 1975, 1977, 3, 64, 32, 0, 1976, 1978, 3, 142, 71, 0, 1977, 1976, 1, 0, 0, 0, 1977, 1978, 1, 0, 0, 0, 1978, 1980, 1, 0, 0, 0, 1979, 1974, 1, 0, 0, 0, 1980, 1983, 1, 0, 0, 0, 1981, 1979, 1, 0, 0, 0, 1981, 1982, 1, 0, 0, 0, 1982, 163, 1, 0, 0, 0, 1983, 1981, 1, 0, 0, 0, 1984, 1985, 3, 86, 43, 0, 1985, 165, 1, 0, 0, 0, 1986, 1987, 3, 86, 43, 0, 1987, 167, 1, 0, 0, 0, 1988, 1989, 7, 25, 0, 0, 1989, 169, 1, 0, 0, 0, 1990, 1991, 5, 188, 0, 0, 1991, 171, 1, 0, 0, 0, 1992, 1995, 3, 64, 32, 0, 1993, 1995, 3, 28, 14, 0, 1994, 1992, 1, 0, 0, 0, 1994, 1993, 1, 0, 0, 0, 1995, 173, 1, 0, 0, 0, 1996, 1997, 7, 26, 0, 0, 1997, 175, 1, 0, 0, 0, 1998, 1999, 7, 27, 0, 0, 1999, 177, 1, 0, 0, 0, 2000, 2001, 3, 224, 112, 0, 2001, 179, 1, 0, 0, 0, 2002, 2003, 3, 224, 112, 0, 2003, 181, 1, 0, 0, 0, 2004, 2005, 3, 224, 112, 0, 2005, 183, 1, 0, 0, 0, 2006, 2007, 3, 224, 112, 0, 2007, 185, 1, 0, 0, 0, 2008, 2009, 3, 224, 112, 0, 2009, 187, 1, 0, 0, 0, 2010, 2011, 3, 224, 112, 0, 2011, 189, 1, 0, 0, 0, 2012, 2013, 3, 224, 112, 0, 2013, 191, 1, 0, 0, 0, 2014, 2015, 3, 224, 112, 0, 2015, 193, 1, 0, 0, 0, 2016, 2017, 3, 224, 112, 0, 2017, 195, 1, 0, 0, 0, 2018, 2019, 3, 224, 112, 0, 2019, 197, 1, 0, 0, 0, 2020, 2021, 3, 224, 112, 0, 2021, 199, 1, 0, 0, 0, 2022, 2023, 3, 224, 112, 0, 2023, 201, 1, 0, 0, 0, 2024, 2025, 3, 224, 112, 0, 2025, 203, 1, 0, 0, 0, 2026, 2027, 3, 224, 112, 0, 2027, 205, 1, 0, 0, 0, 2028, 2029, 3, 224, 112, 0, 2029, 207, 1, 0, 0, 0, 2030, 2031, 3, 224, 112, 0, 2031, 209, 1, 0, 0, 0, 2032, 2033, 3, 224, 112, 0, 2033, 211, 1, 0, 0, 0, 2034, 2035, 3, 224, 112, 0, 2035, 213, 1, 0, 0, 0, 2036, 2037, 3, 224, 112, 0, 2037, 215, 1, 0, 0, 0, 2038, 2039, 3, 224, 112, 0, 2039, 217, 1, 0, 0, 0, 2040, 2041, 3, 224, 112, 0, 2041, 219, 1, 0, 0, 0, 2042, 2043, 3, 224, 112, 0, 2043, 221, 1, 0, 0, 0, 2044, 2045, 3, 224, 112, 0, 2045, 223, 1, 0, 0, 0, 2046, 2054, 5, 185, 0, 0, 2047, 2054, 3, 176, 88, 0, 2048, 2054, 5, 188, 0, 0, 2049, 2050, 5, 3, 0, 0, 2050, 2051, 3, 224, 112, 0, 2051, 2052, 5, 4, 0, 0, 2052, 2054, 1, 0, 0, 0, 2053, 2046, 1, 0, 0, 0, 2053, 2047, 1, 0, 0, 0, 2053, 2048, 1, 0, 0, 0, 2053, 2049, 1, 0, 0, 0, 2054, 225, 1, 0, 0, 0, 296, 229, 237, 244, 249, 255, 261, 263, 289, 296, 303, 309, 313, 318, 321, 328, 331, 335, 343, 347, 349, 353, 357, 361, 364, 371, 377, 383, 388, 399, 405, 409, 413, 416, 420, 426, 431, 440, 447, 453, 457, 461, 466, 472, 484, 488, 493, 496, 499, 502, 506, 509, 523, 530, 537, 539, 542, 548, 553, 561, 566, 581, 587, 597, 602, 612, 616, 618, 622, 627, 629, 637, 643, 648, 655, 666, 669, 671, 678, 682, 689, 695, 701, 707, 712, 721, 726, 737, 742, 753, 758, 762, 778, 788, 793, 801, 813, 818, 826, 833, 836, 839, 846, 849, 852, 855, 859, 867, 872, 882, 887, 896, 903, 907, 911, 914, 922, 935, 938, 946, 955, 959, 964, 994, 1006, 1011, 1023, 1029, 1036, 1040, 1050, 1053, 1059, 1065, 1074, 1077, 1081, 1083, 1085, 1094, 1106, 1117, 1121, 1128, 1134, 1139, 1147, 1152, 1156, 1159, 1163, 1166, 1174, 1185, 1191, 1193, 1201, 1208, 1215, 1220, 1222, 1228, 1237, 1242, 1249, 1253, 1255, 1258, 1266, 1270, 1273, 1279, 1283, 1288, 1295, 1304, 1308, 1310, 1314, 1323, 1328, 1330, 1343, 1346, 1349, 1354, 1358, 1361, 1364, 1369, 1373, 1378, 1381, 1384, 1389, 1393, 1396, 1403, 1408, 1417, 1422, 1425, 1433, 1437, 1445, 1448, 1450, 1459, 1462, 1464, 1468, 1472, 1476, 1479, 1490, 1495, 1499, 1503, 1506, 1511, 1517, 1524, 1531, 1540, 1544, 1546, 1550, 1553, 1561, 1567, 1572, 1578, 1585, 1592, 1597, 1600, 1603, 1606, 1611, 1616, 1623, 1627, 1631, 1641, 1650, 1653, 1662, 1666, 1674, 1683, 1686, 1695, 1698, 1701, 1704, 1714, 1716, 1725, 1734, 1738, 1745, 1752, 1756, 1760, 1769, 1773, 1777, 1782, 1786, 1793, 1803, 1810, 1815, 1818, 1822, 1836, 1848, 1857, 1866, 1870, 1880, 1883, 1892, 1901, 1904, 1910, 1924, 1928, 1939, 1944, 1957, 1964, 1972, 1977, 1981, 1994, 2053] \ No newline at end of file diff --git a/vendor/github.com/libsql/sqlite-antlr4-parser/sqliteparser/SQLiteParser.tokens b/vendor/github.com/libsql/sqlite-antlr4-parser/sqliteparser/SQLiteParser.tokens index 26a91841ac..4418b51e12 100644 --- a/vendor/github.com/libsql/sqlite-antlr4-parser/sqliteparser/SQLiteParser.tokens +++ b/vendor/github.com/libsql/sqlite-antlr4-parser/sqliteparser/SQLiteParser.tokens @@ -1,377 +1,377 @@ -SCOL=1 -DOT=2 -OPEN_PAR=3 -CLOSE_PAR=4 -COMMA=5 -ASSIGN=6 -STAR=7 -PLUS=8 -MINUS=9 -TILDE=10 -PIPE2=11 -DIV=12 -MOD=13 -LT2=14 -GT2=15 -AMP=16 -PIPE=17 -LT=18 -LT_EQ=19 -GT=20 -GT_EQ=21 -EQ=22 -NOT_EQ1=23 -NOT_EQ2=24 -ABORT_=25 -ACTION_=26 -ADD_=27 -AFTER_=28 -ALL_=29 -ALTER_=30 -ANALYZE_=31 -AND_=32 -AS_=33 -ASC_=34 -ATTACH_=35 -AUTOINCREMENT_=36 -BEFORE_=37 -BEGIN_=38 -BETWEEN_=39 -BY_=40 -CASCADE_=41 -CASE_=42 -CAST_=43 -CHECK_=44 -COLLATE_=45 -COLUMN_=46 -COMMIT_=47 -CONFLICT_=48 -CONSTRAINT_=49 -CREATE_=50 -CROSS_=51 -CURRENT_DATE_=52 -CURRENT_TIME_=53 -CURRENT_TIMESTAMP_=54 -DATABASE_=55 -DEFAULT_=56 -DEFERRABLE_=57 -DEFERRED_=58 -DELETE_=59 -DESC_=60 -DETACH_=61 -DISTINCT_=62 -DROP_=63 -EACH_=64 -ELSE_=65 -END_=66 -ESCAPE_=67 -EXCEPT_=68 -EXCLUSIVE_=69 -EXISTS_=70 -EXPLAIN_=71 -FAIL_=72 -FOR_=73 -FOREIGN_=74 -FROM_=75 -FULL_=76 -GLOB_=77 -GROUP_=78 -HAVING_=79 -IF_=80 -IGNORE_=81 -IMMEDIATE_=82 -IN_=83 -INDEX_=84 -INDEXED_=85 -INITIALLY_=86 -INNER_=87 -INSERT_=88 -INSTEAD_=89 -INTERSECT_=90 -INTO_=91 -IS_=92 -ISNULL_=93 -JOIN_=94 -KEY_=95 -LEFT_=96 -LIKE_=97 -LIMIT_=98 -MATCH_=99 -NATURAL_=100 -NO_=101 -NOT_=102 -NOTNULL_=103 -NULL_=104 -OF_=105 -OFFSET_=106 -ON_=107 -OR_=108 -ORDER_=109 -OUTER_=110 -PLAN_=111 -PRAGMA_=112 -PRIMARY_=113 -QUERY_=114 -RAISE_=115 -RECURSIVE_=116 -REFERENCES_=117 -REGEXP_=118 -REINDEX_=119 -RELEASE_=120 -RENAME_=121 -REPLACE_=122 -RESTRICT_=123 -RETURNING_=124 -RIGHT_=125 -ROLLBACK_=126 -ROW_=127 -ROWS_=128 -SAVEPOINT_=129 -SELECT_=130 -SET_=131 -TABLE_=132 -TEMP_=133 -TEMPORARY_=134 -THEN_=135 -TO_=136 -TRANSACTION_=137 -TRIGGER_=138 -UNION_=139 -UNIQUE_=140 -UPDATE_=141 -USING_=142 -VACUUM_=143 -VALUES_=144 -VIEW_=145 -VIRTUAL_=146 -WHEN_=147 -WHERE_=148 -WITH_=149 -WITHOUT_=150 -FIRST_VALUE_=151 -OVER_=152 -PARTITION_=153 -RANGE_=154 -PRECEDING_=155 -UNBOUNDED_=156 -CURRENT_=157 -FOLLOWING_=158 -CUME_DIST_=159 -DENSE_RANK_=160 -LAG_=161 -LAST_VALUE_=162 -LEAD_=163 -NTH_VALUE_=164 -NTILE_=165 -PERCENT_RANK_=166 -RANK_=167 -ROW_NUMBER_=168 -GENERATED_=169 -ALWAYS_=170 -STORED_=171 -TRUE_=172 -FALSE_=173 -WINDOW_=174 -NULLS_=175 -FIRST_=176 -LAST_=177 -FILTER_=178 -GROUPS_=179 -EXCLUDE_=180 -TIES_=181 -OTHERS_=182 -DO_=183 -NOTHING_=184 -IDENTIFIER=185 -NUMERIC_LITERAL=186 -BIND_PARAMETER=187 -STRING_LITERAL=188 -BLOB_LITERAL=189 -SINGLE_LINE_COMMENT=190 -MULTILINE_COMMENT=191 -SPACES=192 -UNEXPECTED_CHAR=193 -';'=1 -'.'=2 -'('=3 -')'=4 -','=5 -'='=6 -'*'=7 -'+'=8 -'-'=9 -'~'=10 -'||'=11 -'/'=12 -'%'=13 -'<<'=14 -'>>'=15 -'&'=16 -'|'=17 -'<'=18 -'<='=19 -'>'=20 -'>='=21 -'=='=22 -'!='=23 -'<>'=24 -'ABORT'=25 -'ACTION'=26 -'ADD'=27 -'AFTER'=28 -'ALL'=29 -'ALTER'=30 -'ANALYZE'=31 -'AND'=32 -'AS'=33 -'ASC'=34 -'ATTACH'=35 -'AUTOINCREMENT'=36 -'BEFORE'=37 -'BEGIN'=38 -'BETWEEN'=39 -'BY'=40 -'CASCADE'=41 -'CASE'=42 -'CAST'=43 -'CHECK'=44 -'COLLATE'=45 -'COLUMN'=46 -'COMMIT'=47 -'CONFLICT'=48 -'CONSTRAINT'=49 -'CREATE'=50 -'CROSS'=51 -'CURRENT_DATE'=52 -'CURRENT_TIME'=53 -'CURRENT_TIMESTAMP'=54 -'DATABASE'=55 -'DEFAULT'=56 -'DEFERRABLE'=57 -'DEFERRED'=58 -'DELETE'=59 -'DESC'=60 -'DETACH'=61 -'DISTINCT'=62 -'DROP'=63 -'EACH'=64 -'ELSE'=65 -'END'=66 -'ESCAPE'=67 -'EXCEPT'=68 -'EXCLUSIVE'=69 -'EXISTS'=70 -'EXPLAIN'=71 -'FAIL'=72 -'FOR'=73 -'FOREIGN'=74 -'FROM'=75 -'FULL'=76 -'GLOB'=77 -'GROUP'=78 -'HAVING'=79 -'IF'=80 -'IGNORE'=81 -'IMMEDIATE'=82 -'IN'=83 -'INDEX'=84 -'INDEXED'=85 -'INITIALLY'=86 -'INNER'=87 -'INSERT'=88 -'INSTEAD'=89 -'INTERSECT'=90 -'INTO'=91 -'IS'=92 -'ISNULL'=93 -'JOIN'=94 -'KEY'=95 -'LEFT'=96 -'LIKE'=97 -'LIMIT'=98 -'MATCH'=99 -'NATURAL'=100 -'NO'=101 -'NOT'=102 -'NOTNULL'=103 -'NULL'=104 -'OF'=105 -'OFFSET'=106 -'ON'=107 -'OR'=108 -'ORDER'=109 -'OUTER'=110 -'PLAN'=111 -'PRAGMA'=112 -'PRIMARY'=113 -'QUERY'=114 -'RAISE'=115 -'RECURSIVE'=116 -'REFERENCES'=117 -'REGEXP'=118 -'REINDEX'=119 -'RELEASE'=120 -'RENAME'=121 -'REPLACE'=122 -'RESTRICT'=123 -'RETURNING'=124 -'RIGHT'=125 -'ROLLBACK'=126 -'ROW'=127 -'ROWS'=128 -'SAVEPOINT'=129 -'SELECT'=130 -'SET'=131 -'TABLE'=132 -'TEMP'=133 -'TEMPORARY'=134 -'THEN'=135 -'TO'=136 -'TRANSACTION'=137 -'TRIGGER'=138 -'UNION'=139 -'UNIQUE'=140 -'UPDATE'=141 -'USING'=142 -'VACUUM'=143 -'VALUES'=144 -'VIEW'=145 -'VIRTUAL'=146 -'WHEN'=147 -'WHERE'=148 -'WITH'=149 -'WITHOUT'=150 -'FIRST_VALUE'=151 -'OVER'=152 -'PARTITION'=153 -'RANGE'=154 -'PRECEDING'=155 -'UNBOUNDED'=156 -'CURRENT'=157 -'FOLLOWING'=158 -'CUME_DIST'=159 -'DENSE_RANK'=160 -'LAG'=161 -'LAST_VALUE'=162 -'LEAD'=163 -'NTH_VALUE'=164 -'NTILE'=165 -'PERCENT_RANK'=166 -'RANK'=167 -'ROW_NUMBER'=168 -'GENERATED'=169 -'ALWAYS'=170 -'STORED'=171 -'TRUE'=172 -'FALSE'=173 -'WINDOW'=174 -'NULLS'=175 -'FIRST'=176 -'LAST'=177 -'FILTER'=178 -'GROUPS'=179 -'EXCLUDE'=180 -'TIES'=181 -'OTHERS'=182 -'DO'=183 -'NOTHING'=184 +SCOL=1 +DOT=2 +OPEN_PAR=3 +CLOSE_PAR=4 +COMMA=5 +ASSIGN=6 +STAR=7 +PLUS=8 +MINUS=9 +TILDE=10 +PIPE2=11 +DIV=12 +MOD=13 +LT2=14 +GT2=15 +AMP=16 +PIPE=17 +LT=18 +LT_EQ=19 +GT=20 +GT_EQ=21 +EQ=22 +NOT_EQ1=23 +NOT_EQ2=24 +ABORT_=25 +ACTION_=26 +ADD_=27 +AFTER_=28 +ALL_=29 +ALTER_=30 +ANALYZE_=31 +AND_=32 +AS_=33 +ASC_=34 +ATTACH_=35 +AUTOINCREMENT_=36 +BEFORE_=37 +BEGIN_=38 +BETWEEN_=39 +BY_=40 +CASCADE_=41 +CASE_=42 +CAST_=43 +CHECK_=44 +COLLATE_=45 +COLUMN_=46 +COMMIT_=47 +CONFLICT_=48 +CONSTRAINT_=49 +CREATE_=50 +CROSS_=51 +CURRENT_DATE_=52 +CURRENT_TIME_=53 +CURRENT_TIMESTAMP_=54 +DATABASE_=55 +DEFAULT_=56 +DEFERRABLE_=57 +DEFERRED_=58 +DELETE_=59 +DESC_=60 +DETACH_=61 +DISTINCT_=62 +DROP_=63 +EACH_=64 +ELSE_=65 +END_=66 +ESCAPE_=67 +EXCEPT_=68 +EXCLUSIVE_=69 +EXISTS_=70 +EXPLAIN_=71 +FAIL_=72 +FOR_=73 +FOREIGN_=74 +FROM_=75 +FULL_=76 +GLOB_=77 +GROUP_=78 +HAVING_=79 +IF_=80 +IGNORE_=81 +IMMEDIATE_=82 +IN_=83 +INDEX_=84 +INDEXED_=85 +INITIALLY_=86 +INNER_=87 +INSERT_=88 +INSTEAD_=89 +INTERSECT_=90 +INTO_=91 +IS_=92 +ISNULL_=93 +JOIN_=94 +KEY_=95 +LEFT_=96 +LIKE_=97 +LIMIT_=98 +MATCH_=99 +NATURAL_=100 +NO_=101 +NOT_=102 +NOTNULL_=103 +NULL_=104 +OF_=105 +OFFSET_=106 +ON_=107 +OR_=108 +ORDER_=109 +OUTER_=110 +PLAN_=111 +PRAGMA_=112 +PRIMARY_=113 +QUERY_=114 +RAISE_=115 +RECURSIVE_=116 +REFERENCES_=117 +REGEXP_=118 +REINDEX_=119 +RELEASE_=120 +RENAME_=121 +REPLACE_=122 +RESTRICT_=123 +RETURNING_=124 +RIGHT_=125 +ROLLBACK_=126 +ROW_=127 +ROWS_=128 +SAVEPOINT_=129 +SELECT_=130 +SET_=131 +TABLE_=132 +TEMP_=133 +TEMPORARY_=134 +THEN_=135 +TO_=136 +TRANSACTION_=137 +TRIGGER_=138 +UNION_=139 +UNIQUE_=140 +UPDATE_=141 +USING_=142 +VACUUM_=143 +VALUES_=144 +VIEW_=145 +VIRTUAL_=146 +WHEN_=147 +WHERE_=148 +WITH_=149 +WITHOUT_=150 +FIRST_VALUE_=151 +OVER_=152 +PARTITION_=153 +RANGE_=154 +PRECEDING_=155 +UNBOUNDED_=156 +CURRENT_=157 +FOLLOWING_=158 +CUME_DIST_=159 +DENSE_RANK_=160 +LAG_=161 +LAST_VALUE_=162 +LEAD_=163 +NTH_VALUE_=164 +NTILE_=165 +PERCENT_RANK_=166 +RANK_=167 +ROW_NUMBER_=168 +GENERATED_=169 +ALWAYS_=170 +STORED_=171 +TRUE_=172 +FALSE_=173 +WINDOW_=174 +NULLS_=175 +FIRST_=176 +LAST_=177 +FILTER_=178 +GROUPS_=179 +EXCLUDE_=180 +TIES_=181 +OTHERS_=182 +DO_=183 +NOTHING_=184 +IDENTIFIER=185 +NUMERIC_LITERAL=186 +BIND_PARAMETER=187 +STRING_LITERAL=188 +BLOB_LITERAL=189 +SINGLE_LINE_COMMENT=190 +MULTILINE_COMMENT=191 +SPACES=192 +UNEXPECTED_CHAR=193 +';'=1 +'.'=2 +'('=3 +')'=4 +','=5 +'='=6 +'*'=7 +'+'=8 +'-'=9 +'~'=10 +'||'=11 +'/'=12 +'%'=13 +'<<'=14 +'>>'=15 +'&'=16 +'|'=17 +'<'=18 +'<='=19 +'>'=20 +'>='=21 +'=='=22 +'!='=23 +'<>'=24 +'ABORT'=25 +'ACTION'=26 +'ADD'=27 +'AFTER'=28 +'ALL'=29 +'ALTER'=30 +'ANALYZE'=31 +'AND'=32 +'AS'=33 +'ASC'=34 +'ATTACH'=35 +'AUTOINCREMENT'=36 +'BEFORE'=37 +'BEGIN'=38 +'BETWEEN'=39 +'BY'=40 +'CASCADE'=41 +'CASE'=42 +'CAST'=43 +'CHECK'=44 +'COLLATE'=45 +'COLUMN'=46 +'COMMIT'=47 +'CONFLICT'=48 +'CONSTRAINT'=49 +'CREATE'=50 +'CROSS'=51 +'CURRENT_DATE'=52 +'CURRENT_TIME'=53 +'CURRENT_TIMESTAMP'=54 +'DATABASE'=55 +'DEFAULT'=56 +'DEFERRABLE'=57 +'DEFERRED'=58 +'DELETE'=59 +'DESC'=60 +'DETACH'=61 +'DISTINCT'=62 +'DROP'=63 +'EACH'=64 +'ELSE'=65 +'END'=66 +'ESCAPE'=67 +'EXCEPT'=68 +'EXCLUSIVE'=69 +'EXISTS'=70 +'EXPLAIN'=71 +'FAIL'=72 +'FOR'=73 +'FOREIGN'=74 +'FROM'=75 +'FULL'=76 +'GLOB'=77 +'GROUP'=78 +'HAVING'=79 +'IF'=80 +'IGNORE'=81 +'IMMEDIATE'=82 +'IN'=83 +'INDEX'=84 +'INDEXED'=85 +'INITIALLY'=86 +'INNER'=87 +'INSERT'=88 +'INSTEAD'=89 +'INTERSECT'=90 +'INTO'=91 +'IS'=92 +'ISNULL'=93 +'JOIN'=94 +'KEY'=95 +'LEFT'=96 +'LIKE'=97 +'LIMIT'=98 +'MATCH'=99 +'NATURAL'=100 +'NO'=101 +'NOT'=102 +'NOTNULL'=103 +'NULL'=104 +'OF'=105 +'OFFSET'=106 +'ON'=107 +'OR'=108 +'ORDER'=109 +'OUTER'=110 +'PLAN'=111 +'PRAGMA'=112 +'PRIMARY'=113 +'QUERY'=114 +'RAISE'=115 +'RECURSIVE'=116 +'REFERENCES'=117 +'REGEXP'=118 +'REINDEX'=119 +'RELEASE'=120 +'RENAME'=121 +'REPLACE'=122 +'RESTRICT'=123 +'RETURNING'=124 +'RIGHT'=125 +'ROLLBACK'=126 +'ROW'=127 +'ROWS'=128 +'SAVEPOINT'=129 +'SELECT'=130 +'SET'=131 +'TABLE'=132 +'TEMP'=133 +'TEMPORARY'=134 +'THEN'=135 +'TO'=136 +'TRANSACTION'=137 +'TRIGGER'=138 +'UNION'=139 +'UNIQUE'=140 +'UPDATE'=141 +'USING'=142 +'VACUUM'=143 +'VALUES'=144 +'VIEW'=145 +'VIRTUAL'=146 +'WHEN'=147 +'WHERE'=148 +'WITH'=149 +'WITHOUT'=150 +'FIRST_VALUE'=151 +'OVER'=152 +'PARTITION'=153 +'RANGE'=154 +'PRECEDING'=155 +'UNBOUNDED'=156 +'CURRENT'=157 +'FOLLOWING'=158 +'CUME_DIST'=159 +'DENSE_RANK'=160 +'LAG'=161 +'LAST_VALUE'=162 +'LEAD'=163 +'NTH_VALUE'=164 +'NTILE'=165 +'PERCENT_RANK'=166 +'RANK'=167 +'ROW_NUMBER'=168 +'GENERATED'=169 +'ALWAYS'=170 +'STORED'=171 +'TRUE'=172 +'FALSE'=173 +'WINDOW'=174 +'NULLS'=175 +'FIRST'=176 +'LAST'=177 +'FILTER'=178 +'GROUPS'=179 +'EXCLUDE'=180 +'TIES'=181 +'OTHERS'=182 +'DO'=183 +'NOTHING'=184 diff --git a/vendor/github.com/libsql/sqlite-antlr4-parser/sqliteparser/sqlite_lexer.go b/vendor/github.com/libsql/sqlite-antlr4-parser/sqliteparser/sqlite_lexer.go index 4e017db8ac..9cd0bb66de 100644 --- a/vendor/github.com/libsql/sqlite-antlr4-parser/sqliteparser/sqlite_lexer.go +++ b/vendor/github.com/libsql/sqlite-antlr4-parser/sqliteparser/sqlite_lexer.go @@ -1,1138 +1,1138 @@ -// Code generated from SQLiteLexer.g4 by ANTLR 4.12.0. DO NOT EDIT. - -package sqliteparser - -import ( - "fmt" - "sync" - "unicode" - - "github.com/antlr/antlr4/runtime/Go/antlr/v4" -) - -// Suppress unused import error -var _ = fmt.Printf -var _ = sync.Once{} -var _ = unicode.IsLetter - -type SQLiteLexer struct { - *antlr.BaseLexer - channelNames []string - modeNames []string - // TODO: EOF string -} - -var sqlitelexerLexerStaticData struct { - once sync.Once - serializedATN []int32 - channelNames []string - modeNames []string - literalNames []string - symbolicNames []string - ruleNames []string - predictionContextCache *antlr.PredictionContextCache - atn *antlr.ATN - decisionToDFA []*antlr.DFA -} - -func sqlitelexerLexerInit() { - staticData := &sqlitelexerLexerStaticData - staticData.channelNames = []string{ - "DEFAULT_TOKEN_CHANNEL", "HIDDEN", - } - staticData.modeNames = []string{ - "DEFAULT_MODE", - } - staticData.literalNames = []string{ - "", "';'", "'.'", "'('", "')'", "','", "'='", "'*'", "'+'", "'-'", "'~'", - "'||'", "'/'", "'%'", "'<<'", "'>>'", "'&'", "'|'", "'<'", "'<='", "'>'", - "'>='", "'=='", "'!='", "'<>'", "'ABORT'", "'ACTION'", "'ADD'", "'AFTER'", - "'ALL'", "'ALTER'", "'ANALYZE'", "'AND'", "'AS'", "'ASC'", "'ATTACH'", - "'AUTOINCREMENT'", "'BEFORE'", "'BEGIN'", "'BETWEEN'", "'BY'", "'CASCADE'", - "'CASE'", "'CAST'", "'CHECK'", "'COLLATE'", "'COLUMN'", "'COMMIT'", - "'CONFLICT'", "'CONSTRAINT'", "'CREATE'", "'CROSS'", "'CURRENT_DATE'", - "'CURRENT_TIME'", "'CURRENT_TIMESTAMP'", "'DATABASE'", "'DEFAULT'", - "'DEFERRABLE'", "'DEFERRED'", "'DELETE'", "'DESC'", "'DETACH'", "'DISTINCT'", - "'DROP'", "'EACH'", "'ELSE'", "'END'", "'ESCAPE'", "'EXCEPT'", "'EXCLUSIVE'", - "'EXISTS'", "'EXPLAIN'", "'FAIL'", "'FOR'", "'FOREIGN'", "'FROM'", "'FULL'", - "'GLOB'", "'GROUP'", "'HAVING'", "'IF'", "'IGNORE'", "'IMMEDIATE'", - "'IN'", "'INDEX'", "'INDEXED'", "'INITIALLY'", "'INNER'", "'INSERT'", - "'INSTEAD'", "'INTERSECT'", "'INTO'", "'IS'", "'ISNULL'", "'JOIN'", - "'KEY'", "'LEFT'", "'LIKE'", "'LIMIT'", "'MATCH'", "'NATURAL'", "'NO'", - "'NOT'", "'NOTNULL'", "'NULL'", "'OF'", "'OFFSET'", "'ON'", "'OR'", - "'ORDER'", "'OUTER'", "'PLAN'", "'PRAGMA'", "'PRIMARY'", "'QUERY'", - "'RAISE'", "'RECURSIVE'", "'REFERENCES'", "'REGEXP'", "'REINDEX'", "'RELEASE'", - "'RENAME'", "'REPLACE'", "'RESTRICT'", "'RETURNING'", "'RIGHT'", "'ROLLBACK'", - "'ROW'", "'ROWS'", "'SAVEPOINT'", "'SELECT'", "'SET'", "'TABLE'", "'TEMP'", - "'TEMPORARY'", "'THEN'", "'TO'", "'TRANSACTION'", "'TRIGGER'", "'UNION'", - "'UNIQUE'", "'UPDATE'", "'USING'", "'VACUUM'", "'VALUES'", "'VIEW'", - "'VIRTUAL'", "'WHEN'", "'WHERE'", "'WITH'", "'WITHOUT'", "'FIRST_VALUE'", - "'OVER'", "'PARTITION'", "'RANGE'", "'PRECEDING'", "'UNBOUNDED'", "'CURRENT'", - "'FOLLOWING'", "'CUME_DIST'", "'DENSE_RANK'", "'LAG'", "'LAST_VALUE'", - "'LEAD'", "'NTH_VALUE'", "'NTILE'", "'PERCENT_RANK'", "'RANK'", "'ROW_NUMBER'", - "'GENERATED'", "'ALWAYS'", "'STORED'", "'TRUE'", "'FALSE'", "'WINDOW'", - "'NULLS'", "'FIRST'", "'LAST'", "'FILTER'", "'GROUPS'", "'EXCLUDE'", - "'TIES'", "'OTHERS'", "'DO'", "'NOTHING'", - } - staticData.symbolicNames = []string{ - "", "SCOL", "DOT", "OPEN_PAR", "CLOSE_PAR", "COMMA", "ASSIGN", "STAR", - "PLUS", "MINUS", "TILDE", "PIPE2", "DIV", "MOD", "LT2", "GT2", "AMP", - "PIPE", "LT", "LT_EQ", "GT", "GT_EQ", "EQ", "NOT_EQ1", "NOT_EQ2", "ABORT_", - "ACTION_", "ADD_", "AFTER_", "ALL_", "ALTER_", "ANALYZE_", "AND_", "AS_", - "ASC_", "ATTACH_", "AUTOINCREMENT_", "BEFORE_", "BEGIN_", "BETWEEN_", - "BY_", "CASCADE_", "CASE_", "CAST_", "CHECK_", "COLLATE_", "COLUMN_", - "COMMIT_", "CONFLICT_", "CONSTRAINT_", "CREATE_", "CROSS_", "CURRENT_DATE_", - "CURRENT_TIME_", "CURRENT_TIMESTAMP_", "DATABASE_", "DEFAULT_", "DEFERRABLE_", - "DEFERRED_", "DELETE_", "DESC_", "DETACH_", "DISTINCT_", "DROP_", "EACH_", - "ELSE_", "END_", "ESCAPE_", "EXCEPT_", "EXCLUSIVE_", "EXISTS_", "EXPLAIN_", - "FAIL_", "FOR_", "FOREIGN_", "FROM_", "FULL_", "GLOB_", "GROUP_", "HAVING_", - "IF_", "IGNORE_", "IMMEDIATE_", "IN_", "INDEX_", "INDEXED_", "INITIALLY_", - "INNER_", "INSERT_", "INSTEAD_", "INTERSECT_", "INTO_", "IS_", "ISNULL_", - "JOIN_", "KEY_", "LEFT_", "LIKE_", "LIMIT_", "MATCH_", "NATURAL_", "NO_", - "NOT_", "NOTNULL_", "NULL_", "OF_", "OFFSET_", "ON_", "OR_", "ORDER_", - "OUTER_", "PLAN_", "PRAGMA_", "PRIMARY_", "QUERY_", "RAISE_", "RECURSIVE_", - "REFERENCES_", "REGEXP_", "REINDEX_", "RELEASE_", "RENAME_", "REPLACE_", - "RESTRICT_", "RETURNING_", "RIGHT_", "ROLLBACK_", "ROW_", "ROWS_", "SAVEPOINT_", - "SELECT_", "SET_", "TABLE_", "TEMP_", "TEMPORARY_", "THEN_", "TO_", - "TRANSACTION_", "TRIGGER_", "UNION_", "UNIQUE_", "UPDATE_", "USING_", - "VACUUM_", "VALUES_", "VIEW_", "VIRTUAL_", "WHEN_", "WHERE_", "WITH_", - "WITHOUT_", "FIRST_VALUE_", "OVER_", "PARTITION_", "RANGE_", "PRECEDING_", - "UNBOUNDED_", "CURRENT_", "FOLLOWING_", "CUME_DIST_", "DENSE_RANK_", - "LAG_", "LAST_VALUE_", "LEAD_", "NTH_VALUE_", "NTILE_", "PERCENT_RANK_", - "RANK_", "ROW_NUMBER_", "GENERATED_", "ALWAYS_", "STORED_", "TRUE_", - "FALSE_", "WINDOW_", "NULLS_", "FIRST_", "LAST_", "FILTER_", "GROUPS_", - "EXCLUDE_", "TIES_", "OTHERS_", "DO_", "NOTHING_", "IDENTIFIER", "NUMERIC_LITERAL", - "BIND_PARAMETER", "STRING_LITERAL", "BLOB_LITERAL", "SINGLE_LINE_COMMENT", - "MULTILINE_COMMENT", "SPACES", "UNEXPECTED_CHAR", - } - staticData.ruleNames = []string{ - "SCOL", "DOT", "OPEN_PAR", "CLOSE_PAR", "COMMA", "ASSIGN", "STAR", "PLUS", - "MINUS", "TILDE", "PIPE2", "DIV", "MOD", "LT2", "GT2", "AMP", "PIPE", - "LT", "LT_EQ", "GT", "GT_EQ", "EQ", "NOT_EQ1", "NOT_EQ2", "ABORT_", - "ACTION_", "ADD_", "AFTER_", "ALL_", "ALTER_", "ANALYZE_", "AND_", "AS_", - "ASC_", "ATTACH_", "AUTOINCREMENT_", "BEFORE_", "BEGIN_", "BETWEEN_", - "BY_", "CASCADE_", "CASE_", "CAST_", "CHECK_", "COLLATE_", "COLUMN_", - "COMMIT_", "CONFLICT_", "CONSTRAINT_", "CREATE_", "CROSS_", "CURRENT_DATE_", - "CURRENT_TIME_", "CURRENT_TIMESTAMP_", "DATABASE_", "DEFAULT_", "DEFERRABLE_", - "DEFERRED_", "DELETE_", "DESC_", "DETACH_", "DISTINCT_", "DROP_", "EACH_", - "ELSE_", "END_", "ESCAPE_", "EXCEPT_", "EXCLUSIVE_", "EXISTS_", "EXPLAIN_", - "FAIL_", "FOR_", "FOREIGN_", "FROM_", "FULL_", "GLOB_", "GROUP_", "HAVING_", - "IF_", "IGNORE_", "IMMEDIATE_", "IN_", "INDEX_", "INDEXED_", "INITIALLY_", - "INNER_", "INSERT_", "INSTEAD_", "INTERSECT_", "INTO_", "IS_", "ISNULL_", - "JOIN_", "KEY_", "LEFT_", "LIKE_", "LIMIT_", "MATCH_", "NATURAL_", "NO_", - "NOT_", "NOTNULL_", "NULL_", "OF_", "OFFSET_", "ON_", "OR_", "ORDER_", - "OUTER_", "PLAN_", "PRAGMA_", "PRIMARY_", "QUERY_", "RAISE_", "RECURSIVE_", - "REFERENCES_", "REGEXP_", "REINDEX_", "RELEASE_", "RENAME_", "REPLACE_", - "RESTRICT_", "RETURNING_", "RIGHT_", "ROLLBACK_", "ROW_", "ROWS_", "SAVEPOINT_", - "SELECT_", "SET_", "TABLE_", "TEMP_", "TEMPORARY_", "THEN_", "TO_", - "TRANSACTION_", "TRIGGER_", "UNION_", "UNIQUE_", "UPDATE_", "USING_", - "VACUUM_", "VALUES_", "VIEW_", "VIRTUAL_", "WHEN_", "WHERE_", "WITH_", - "WITHOUT_", "FIRST_VALUE_", "OVER_", "PARTITION_", "RANGE_", "PRECEDING_", - "UNBOUNDED_", "CURRENT_", "FOLLOWING_", "CUME_DIST_", "DENSE_RANK_", - "LAG_", "LAST_VALUE_", "LEAD_", "NTH_VALUE_", "NTILE_", "PERCENT_RANK_", - "RANK_", "ROW_NUMBER_", "GENERATED_", "ALWAYS_", "STORED_", "TRUE_", - "FALSE_", "WINDOW_", "NULLS_", "FIRST_", "LAST_", "FILTER_", "GROUPS_", - "EXCLUDE_", "TIES_", "OTHERS_", "DO_", "NOTHING_", "IDENTIFIER", "NUMERIC_LITERAL", - "BIND_PARAMETER", "STRING_LITERAL", "BLOB_LITERAL", "SINGLE_LINE_COMMENT", - "MULTILINE_COMMENT", "SPACES", "UNEXPECTED_CHAR", "HEX_DIGIT", "DIGIT", - } - staticData.predictionContextCache = antlr.NewPredictionContextCache() - staticData.serializedATN = []int32{ - 4, 0, 193, 1704, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, - 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, - 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, - 15, 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, - 7, 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, - 25, 2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, - 2, 31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, - 36, 7, 36, 2, 37, 7, 37, 2, 38, 7, 38, 2, 39, 7, 39, 2, 40, 7, 40, 2, 41, - 7, 41, 2, 42, 7, 42, 2, 43, 7, 43, 2, 44, 7, 44, 2, 45, 7, 45, 2, 46, 7, - 46, 2, 47, 7, 47, 2, 48, 7, 48, 2, 49, 7, 49, 2, 50, 7, 50, 2, 51, 7, 51, - 2, 52, 7, 52, 2, 53, 7, 53, 2, 54, 7, 54, 2, 55, 7, 55, 2, 56, 7, 56, 2, - 57, 7, 57, 2, 58, 7, 58, 2, 59, 7, 59, 2, 60, 7, 60, 2, 61, 7, 61, 2, 62, - 7, 62, 2, 63, 7, 63, 2, 64, 7, 64, 2, 65, 7, 65, 2, 66, 7, 66, 2, 67, 7, - 67, 2, 68, 7, 68, 2, 69, 7, 69, 2, 70, 7, 70, 2, 71, 7, 71, 2, 72, 7, 72, - 2, 73, 7, 73, 2, 74, 7, 74, 2, 75, 7, 75, 2, 76, 7, 76, 2, 77, 7, 77, 2, - 78, 7, 78, 2, 79, 7, 79, 2, 80, 7, 80, 2, 81, 7, 81, 2, 82, 7, 82, 2, 83, - 7, 83, 2, 84, 7, 84, 2, 85, 7, 85, 2, 86, 7, 86, 2, 87, 7, 87, 2, 88, 7, - 88, 2, 89, 7, 89, 2, 90, 7, 90, 2, 91, 7, 91, 2, 92, 7, 92, 2, 93, 7, 93, - 2, 94, 7, 94, 2, 95, 7, 95, 2, 96, 7, 96, 2, 97, 7, 97, 2, 98, 7, 98, 2, - 99, 7, 99, 2, 100, 7, 100, 2, 101, 7, 101, 2, 102, 7, 102, 2, 103, 7, 103, - 2, 104, 7, 104, 2, 105, 7, 105, 2, 106, 7, 106, 2, 107, 7, 107, 2, 108, - 7, 108, 2, 109, 7, 109, 2, 110, 7, 110, 2, 111, 7, 111, 2, 112, 7, 112, - 2, 113, 7, 113, 2, 114, 7, 114, 2, 115, 7, 115, 2, 116, 7, 116, 2, 117, - 7, 117, 2, 118, 7, 118, 2, 119, 7, 119, 2, 120, 7, 120, 2, 121, 7, 121, - 2, 122, 7, 122, 2, 123, 7, 123, 2, 124, 7, 124, 2, 125, 7, 125, 2, 126, - 7, 126, 2, 127, 7, 127, 2, 128, 7, 128, 2, 129, 7, 129, 2, 130, 7, 130, - 2, 131, 7, 131, 2, 132, 7, 132, 2, 133, 7, 133, 2, 134, 7, 134, 2, 135, - 7, 135, 2, 136, 7, 136, 2, 137, 7, 137, 2, 138, 7, 138, 2, 139, 7, 139, - 2, 140, 7, 140, 2, 141, 7, 141, 2, 142, 7, 142, 2, 143, 7, 143, 2, 144, - 7, 144, 2, 145, 7, 145, 2, 146, 7, 146, 2, 147, 7, 147, 2, 148, 7, 148, - 2, 149, 7, 149, 2, 150, 7, 150, 2, 151, 7, 151, 2, 152, 7, 152, 2, 153, - 7, 153, 2, 154, 7, 154, 2, 155, 7, 155, 2, 156, 7, 156, 2, 157, 7, 157, - 2, 158, 7, 158, 2, 159, 7, 159, 2, 160, 7, 160, 2, 161, 7, 161, 2, 162, - 7, 162, 2, 163, 7, 163, 2, 164, 7, 164, 2, 165, 7, 165, 2, 166, 7, 166, - 2, 167, 7, 167, 2, 168, 7, 168, 2, 169, 7, 169, 2, 170, 7, 170, 2, 171, - 7, 171, 2, 172, 7, 172, 2, 173, 7, 173, 2, 174, 7, 174, 2, 175, 7, 175, - 2, 176, 7, 176, 2, 177, 7, 177, 2, 178, 7, 178, 2, 179, 7, 179, 2, 180, - 7, 180, 2, 181, 7, 181, 2, 182, 7, 182, 2, 183, 7, 183, 2, 184, 7, 184, - 2, 185, 7, 185, 2, 186, 7, 186, 2, 187, 7, 187, 2, 188, 7, 188, 2, 189, - 7, 189, 2, 190, 7, 190, 2, 191, 7, 191, 2, 192, 7, 192, 2, 193, 7, 193, - 2, 194, 7, 194, 1, 0, 1, 0, 1, 1, 1, 1, 1, 2, 1, 2, 1, 3, 1, 3, 1, 4, 1, - 4, 1, 5, 1, 5, 1, 6, 1, 6, 1, 7, 1, 7, 1, 8, 1, 8, 1, 9, 1, 9, 1, 10, 1, - 10, 1, 10, 1, 11, 1, 11, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13, 1, 14, 1, 14, - 1, 14, 1, 15, 1, 15, 1, 16, 1, 16, 1, 17, 1, 17, 1, 18, 1, 18, 1, 18, 1, - 19, 1, 19, 1, 20, 1, 20, 1, 20, 1, 21, 1, 21, 1, 21, 1, 22, 1, 22, 1, 22, - 1, 23, 1, 23, 1, 23, 1, 24, 1, 24, 1, 24, 1, 24, 1, 24, 1, 24, 1, 25, 1, - 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 26, 1, 26, 1, 26, 1, 26, 1, 27, - 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 28, 1, 28, 1, 28, 1, 28, 1, 29, 1, - 29, 1, 29, 1, 29, 1, 29, 1, 29, 1, 30, 1, 30, 1, 30, 1, 30, 1, 30, 1, 30, - 1, 30, 1, 30, 1, 31, 1, 31, 1, 31, 1, 31, 1, 32, 1, 32, 1, 32, 1, 33, 1, - 33, 1, 33, 1, 33, 1, 34, 1, 34, 1, 34, 1, 34, 1, 34, 1, 34, 1, 34, 1, 35, - 1, 35, 1, 35, 1, 35, 1, 35, 1, 35, 1, 35, 1, 35, 1, 35, 1, 35, 1, 35, 1, - 35, 1, 35, 1, 35, 1, 36, 1, 36, 1, 36, 1, 36, 1, 36, 1, 36, 1, 36, 1, 37, - 1, 37, 1, 37, 1, 37, 1, 37, 1, 37, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, - 38, 1, 38, 1, 38, 1, 39, 1, 39, 1, 39, 1, 40, 1, 40, 1, 40, 1, 40, 1, 40, - 1, 40, 1, 40, 1, 40, 1, 41, 1, 41, 1, 41, 1, 41, 1, 41, 1, 42, 1, 42, 1, - 42, 1, 42, 1, 42, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 44, 1, 44, - 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 45, 1, 45, 1, 45, 1, 45, 1, - 45, 1, 45, 1, 45, 1, 46, 1, 46, 1, 46, 1, 46, 1, 46, 1, 46, 1, 46, 1, 47, - 1, 47, 1, 47, 1, 47, 1, 47, 1, 47, 1, 47, 1, 47, 1, 47, 1, 48, 1, 48, 1, - 48, 1, 48, 1, 48, 1, 48, 1, 48, 1, 48, 1, 48, 1, 48, 1, 48, 1, 49, 1, 49, - 1, 49, 1, 49, 1, 49, 1, 49, 1, 49, 1, 50, 1, 50, 1, 50, 1, 50, 1, 50, 1, - 50, 1, 51, 1, 51, 1, 51, 1, 51, 1, 51, 1, 51, 1, 51, 1, 51, 1, 51, 1, 51, - 1, 51, 1, 51, 1, 51, 1, 52, 1, 52, 1, 52, 1, 52, 1, 52, 1, 52, 1, 52, 1, - 52, 1, 52, 1, 52, 1, 52, 1, 52, 1, 52, 1, 53, 1, 53, 1, 53, 1, 53, 1, 53, - 1, 53, 1, 53, 1, 53, 1, 53, 1, 53, 1, 53, 1, 53, 1, 53, 1, 53, 1, 53, 1, - 53, 1, 53, 1, 53, 1, 54, 1, 54, 1, 54, 1, 54, 1, 54, 1, 54, 1, 54, 1, 54, - 1, 54, 1, 55, 1, 55, 1, 55, 1, 55, 1, 55, 1, 55, 1, 55, 1, 55, 1, 56, 1, - 56, 1, 56, 1, 56, 1, 56, 1, 56, 1, 56, 1, 56, 1, 56, 1, 56, 1, 56, 1, 57, - 1, 57, 1, 57, 1, 57, 1, 57, 1, 57, 1, 57, 1, 57, 1, 57, 1, 58, 1, 58, 1, - 58, 1, 58, 1, 58, 1, 58, 1, 58, 1, 59, 1, 59, 1, 59, 1, 59, 1, 59, 1, 60, - 1, 60, 1, 60, 1, 60, 1, 60, 1, 60, 1, 60, 1, 61, 1, 61, 1, 61, 1, 61, 1, - 61, 1, 61, 1, 61, 1, 61, 1, 61, 1, 62, 1, 62, 1, 62, 1, 62, 1, 62, 1, 63, - 1, 63, 1, 63, 1, 63, 1, 63, 1, 64, 1, 64, 1, 64, 1, 64, 1, 64, 1, 65, 1, - 65, 1, 65, 1, 65, 1, 66, 1, 66, 1, 66, 1, 66, 1, 66, 1, 66, 1, 66, 1, 67, - 1, 67, 1, 67, 1, 67, 1, 67, 1, 67, 1, 67, 1, 68, 1, 68, 1, 68, 1, 68, 1, - 68, 1, 68, 1, 68, 1, 68, 1, 68, 1, 68, 1, 69, 1, 69, 1, 69, 1, 69, 1, 69, - 1, 69, 1, 69, 1, 70, 1, 70, 1, 70, 1, 70, 1, 70, 1, 70, 1, 70, 1, 70, 1, - 71, 1, 71, 1, 71, 1, 71, 1, 71, 1, 72, 1, 72, 1, 72, 1, 72, 1, 73, 1, 73, - 1, 73, 1, 73, 1, 73, 1, 73, 1, 73, 1, 73, 1, 74, 1, 74, 1, 74, 1, 74, 1, - 74, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 76, 1, 76, 1, 76, 1, 76, 1, 76, - 1, 77, 1, 77, 1, 77, 1, 77, 1, 77, 1, 77, 1, 78, 1, 78, 1, 78, 1, 78, 1, - 78, 1, 78, 1, 78, 1, 79, 1, 79, 1, 79, 1, 80, 1, 80, 1, 80, 1, 80, 1, 80, - 1, 80, 1, 80, 1, 81, 1, 81, 1, 81, 1, 81, 1, 81, 1, 81, 1, 81, 1, 81, 1, - 81, 1, 81, 1, 82, 1, 82, 1, 82, 1, 83, 1, 83, 1, 83, 1, 83, 1, 83, 1, 83, - 1, 84, 1, 84, 1, 84, 1, 84, 1, 84, 1, 84, 1, 84, 1, 84, 1, 85, 1, 85, 1, - 85, 1, 85, 1, 85, 1, 85, 1, 85, 1, 85, 1, 85, 1, 85, 1, 86, 1, 86, 1, 86, - 1, 86, 1, 86, 1, 86, 1, 87, 1, 87, 1, 87, 1, 87, 1, 87, 1, 87, 1, 87, 1, - 88, 1, 88, 1, 88, 1, 88, 1, 88, 1, 88, 1, 88, 1, 88, 1, 89, 1, 89, 1, 89, - 1, 89, 1, 89, 1, 89, 1, 89, 1, 89, 1, 89, 1, 89, 1, 90, 1, 90, 1, 90, 1, - 90, 1, 90, 1, 91, 1, 91, 1, 91, 1, 92, 1, 92, 1, 92, 1, 92, 1, 92, 1, 92, - 1, 92, 1, 93, 1, 93, 1, 93, 1, 93, 1, 93, 1, 94, 1, 94, 1, 94, 1, 94, 1, - 95, 1, 95, 1, 95, 1, 95, 1, 95, 1, 96, 1, 96, 1, 96, 1, 96, 1, 96, 1, 97, - 1, 97, 1, 97, 1, 97, 1, 97, 1, 97, 1, 98, 1, 98, 1, 98, 1, 98, 1, 98, 1, - 98, 1, 99, 1, 99, 1, 99, 1, 99, 1, 99, 1, 99, 1, 99, 1, 99, 1, 100, 1, - 100, 1, 100, 1, 101, 1, 101, 1, 101, 1, 101, 1, 102, 1, 102, 1, 102, 1, - 102, 1, 102, 1, 102, 1, 102, 1, 102, 1, 103, 1, 103, 1, 103, 1, 103, 1, - 103, 1, 104, 1, 104, 1, 104, 1, 105, 1, 105, 1, 105, 1, 105, 1, 105, 1, - 105, 1, 105, 1, 106, 1, 106, 1, 106, 1, 107, 1, 107, 1, 107, 1, 108, 1, - 108, 1, 108, 1, 108, 1, 108, 1, 108, 1, 109, 1, 109, 1, 109, 1, 109, 1, - 109, 1, 109, 1, 110, 1, 110, 1, 110, 1, 110, 1, 110, 1, 111, 1, 111, 1, - 111, 1, 111, 1, 111, 1, 111, 1, 111, 1, 112, 1, 112, 1, 112, 1, 112, 1, - 112, 1, 112, 1, 112, 1, 112, 1, 113, 1, 113, 1, 113, 1, 113, 1, 113, 1, - 113, 1, 114, 1, 114, 1, 114, 1, 114, 1, 114, 1, 114, 1, 115, 1, 115, 1, - 115, 1, 115, 1, 115, 1, 115, 1, 115, 1, 115, 1, 115, 1, 115, 1, 116, 1, - 116, 1, 116, 1, 116, 1, 116, 1, 116, 1, 116, 1, 116, 1, 116, 1, 116, 1, - 116, 1, 117, 1, 117, 1, 117, 1, 117, 1, 117, 1, 117, 1, 117, 1, 118, 1, - 118, 1, 118, 1, 118, 1, 118, 1, 118, 1, 118, 1, 118, 1, 119, 1, 119, 1, - 119, 1, 119, 1, 119, 1, 119, 1, 119, 1, 119, 1, 120, 1, 120, 1, 120, 1, - 120, 1, 120, 1, 120, 1, 120, 1, 121, 1, 121, 1, 121, 1, 121, 1, 121, 1, - 121, 1, 121, 1, 121, 1, 122, 1, 122, 1, 122, 1, 122, 1, 122, 1, 122, 1, - 122, 1, 122, 1, 122, 1, 123, 1, 123, 1, 123, 1, 123, 1, 123, 1, 123, 1, - 123, 1, 123, 1, 123, 1, 123, 1, 124, 1, 124, 1, 124, 1, 124, 1, 124, 1, - 124, 1, 125, 1, 125, 1, 125, 1, 125, 1, 125, 1, 125, 1, 125, 1, 125, 1, - 125, 1, 126, 1, 126, 1, 126, 1, 126, 1, 127, 1, 127, 1, 127, 1, 127, 1, - 127, 1, 128, 1, 128, 1, 128, 1, 128, 1, 128, 1, 128, 1, 128, 1, 128, 1, - 128, 1, 128, 1, 129, 1, 129, 1, 129, 1, 129, 1, 129, 1, 129, 1, 129, 1, - 130, 1, 130, 1, 130, 1, 130, 1, 131, 1, 131, 1, 131, 1, 131, 1, 131, 1, - 131, 1, 132, 1, 132, 1, 132, 1, 132, 1, 132, 1, 133, 1, 133, 1, 133, 1, - 133, 1, 133, 1, 133, 1, 133, 1, 133, 1, 133, 1, 133, 1, 134, 1, 134, 1, - 134, 1, 134, 1, 134, 1, 135, 1, 135, 1, 135, 1, 136, 1, 136, 1, 136, 1, - 136, 1, 136, 1, 136, 1, 136, 1, 136, 1, 136, 1, 136, 1, 136, 1, 136, 1, - 137, 1, 137, 1, 137, 1, 137, 1, 137, 1, 137, 1, 137, 1, 137, 1, 138, 1, - 138, 1, 138, 1, 138, 1, 138, 1, 138, 1, 139, 1, 139, 1, 139, 1, 139, 1, - 139, 1, 139, 1, 139, 1, 140, 1, 140, 1, 140, 1, 140, 1, 140, 1, 140, 1, - 140, 1, 141, 1, 141, 1, 141, 1, 141, 1, 141, 1, 141, 1, 142, 1, 142, 1, - 142, 1, 142, 1, 142, 1, 142, 1, 142, 1, 143, 1, 143, 1, 143, 1, 143, 1, - 143, 1, 143, 1, 143, 1, 144, 1, 144, 1, 144, 1, 144, 1, 144, 1, 145, 1, - 145, 1, 145, 1, 145, 1, 145, 1, 145, 1, 145, 1, 145, 1, 146, 1, 146, 1, - 146, 1, 146, 1, 146, 1, 147, 1, 147, 1, 147, 1, 147, 1, 147, 1, 147, 1, - 148, 1, 148, 1, 148, 1, 148, 1, 148, 1, 149, 1, 149, 1, 149, 1, 149, 1, - 149, 1, 149, 1, 149, 1, 149, 1, 150, 1, 150, 1, 150, 1, 150, 1, 150, 1, - 150, 1, 150, 1, 150, 1, 150, 1, 150, 1, 150, 1, 150, 1, 151, 1, 151, 1, - 151, 1, 151, 1, 151, 1, 152, 1, 152, 1, 152, 1, 152, 1, 152, 1, 152, 1, - 152, 1, 152, 1, 152, 1, 152, 1, 153, 1, 153, 1, 153, 1, 153, 1, 153, 1, - 153, 1, 154, 1, 154, 1, 154, 1, 154, 1, 154, 1, 154, 1, 154, 1, 154, 1, - 154, 1, 154, 1, 155, 1, 155, 1, 155, 1, 155, 1, 155, 1, 155, 1, 155, 1, - 155, 1, 155, 1, 155, 1, 156, 1, 156, 1, 156, 1, 156, 1, 156, 1, 156, 1, - 156, 1, 156, 1, 157, 1, 157, 1, 157, 1, 157, 1, 157, 1, 157, 1, 157, 1, - 157, 1, 157, 1, 157, 1, 158, 1, 158, 1, 158, 1, 158, 1, 158, 1, 158, 1, - 158, 1, 158, 1, 158, 1, 158, 1, 159, 1, 159, 1, 159, 1, 159, 1, 159, 1, - 159, 1, 159, 1, 159, 1, 159, 1, 159, 1, 159, 1, 160, 1, 160, 1, 160, 1, - 160, 1, 161, 1, 161, 1, 161, 1, 161, 1, 161, 1, 161, 1, 161, 1, 161, 1, - 161, 1, 161, 1, 161, 1, 162, 1, 162, 1, 162, 1, 162, 1, 162, 1, 163, 1, - 163, 1, 163, 1, 163, 1, 163, 1, 163, 1, 163, 1, 163, 1, 163, 1, 163, 1, - 164, 1, 164, 1, 164, 1, 164, 1, 164, 1, 164, 1, 165, 1, 165, 1, 165, 1, - 165, 1, 165, 1, 165, 1, 165, 1, 165, 1, 165, 1, 165, 1, 165, 1, 165, 1, - 165, 1, 166, 1, 166, 1, 166, 1, 166, 1, 166, 1, 167, 1, 167, 1, 167, 1, - 167, 1, 167, 1, 167, 1, 167, 1, 167, 1, 167, 1, 167, 1, 167, 1, 168, 1, - 168, 1, 168, 1, 168, 1, 168, 1, 168, 1, 168, 1, 168, 1, 168, 1, 168, 1, - 169, 1, 169, 1, 169, 1, 169, 1, 169, 1, 169, 1, 169, 1, 170, 1, 170, 1, - 170, 1, 170, 1, 170, 1, 170, 1, 170, 1, 171, 1, 171, 1, 171, 1, 171, 1, - 171, 1, 172, 1, 172, 1, 172, 1, 172, 1, 172, 1, 172, 1, 173, 1, 173, 1, - 173, 1, 173, 1, 173, 1, 173, 1, 173, 1, 174, 1, 174, 1, 174, 1, 174, 1, - 174, 1, 174, 1, 175, 1, 175, 1, 175, 1, 175, 1, 175, 1, 175, 1, 176, 1, - 176, 1, 176, 1, 176, 1, 176, 1, 177, 1, 177, 1, 177, 1, 177, 1, 177, 1, - 177, 1, 177, 1, 178, 1, 178, 1, 178, 1, 178, 1, 178, 1, 178, 1, 178, 1, - 179, 1, 179, 1, 179, 1, 179, 1, 179, 1, 179, 1, 179, 1, 179, 1, 180, 1, - 180, 1, 180, 1, 180, 1, 180, 1, 181, 1, 181, 1, 181, 1, 181, 1, 181, 1, - 181, 1, 181, 1, 182, 1, 182, 1, 182, 1, 183, 1, 183, 1, 183, 1, 183, 1, - 183, 1, 183, 1, 183, 1, 183, 1, 184, 1, 184, 1, 184, 1, 184, 5, 184, 1562, - 8, 184, 10, 184, 12, 184, 1565, 9, 184, 1, 184, 1, 184, 1, 184, 1, 184, - 1, 184, 5, 184, 1572, 8, 184, 10, 184, 12, 184, 1575, 9, 184, 1, 184, 1, - 184, 1, 184, 5, 184, 1580, 8, 184, 10, 184, 12, 184, 1583, 9, 184, 1, 184, - 1, 184, 1, 184, 5, 184, 1588, 8, 184, 10, 184, 12, 184, 1591, 9, 184, 3, - 184, 1593, 8, 184, 1, 185, 4, 185, 1596, 8, 185, 11, 185, 12, 185, 1597, - 1, 185, 1, 185, 5, 185, 1602, 8, 185, 10, 185, 12, 185, 1605, 9, 185, 3, - 185, 1607, 8, 185, 1, 185, 1, 185, 4, 185, 1611, 8, 185, 11, 185, 12, 185, - 1612, 3, 185, 1615, 8, 185, 1, 185, 1, 185, 3, 185, 1619, 8, 185, 1, 185, - 4, 185, 1622, 8, 185, 11, 185, 12, 185, 1623, 3, 185, 1626, 8, 185, 1, - 185, 1, 185, 1, 185, 1, 185, 4, 185, 1632, 8, 185, 11, 185, 12, 185, 1633, - 3, 185, 1636, 8, 185, 1, 186, 1, 186, 5, 186, 1640, 8, 186, 10, 186, 12, - 186, 1643, 9, 186, 1, 186, 1, 186, 3, 186, 1647, 8, 186, 1, 187, 1, 187, - 1, 187, 1, 187, 5, 187, 1653, 8, 187, 10, 187, 12, 187, 1656, 9, 187, 1, - 187, 1, 187, 1, 188, 1, 188, 1, 188, 1, 189, 1, 189, 1, 189, 1, 189, 5, - 189, 1667, 8, 189, 10, 189, 12, 189, 1670, 9, 189, 1, 189, 3, 189, 1673, - 8, 189, 1, 189, 1, 189, 3, 189, 1677, 8, 189, 1, 189, 1, 189, 1, 190, 1, - 190, 1, 190, 1, 190, 5, 190, 1685, 8, 190, 10, 190, 12, 190, 1688, 9, 190, - 1, 190, 1, 190, 1, 190, 1, 190, 1, 190, 1, 191, 1, 191, 1, 191, 1, 191, - 1, 192, 1, 192, 1, 193, 1, 193, 1, 194, 1, 194, 1, 1686, 0, 195, 1, 1, - 3, 2, 5, 3, 7, 4, 9, 5, 11, 6, 13, 7, 15, 8, 17, 9, 19, 10, 21, 11, 23, - 12, 25, 13, 27, 14, 29, 15, 31, 16, 33, 17, 35, 18, 37, 19, 39, 20, 41, - 21, 43, 22, 45, 23, 47, 24, 49, 25, 51, 26, 53, 27, 55, 28, 57, 29, 59, - 30, 61, 31, 63, 32, 65, 33, 67, 34, 69, 35, 71, 36, 73, 37, 75, 38, 77, - 39, 79, 40, 81, 41, 83, 42, 85, 43, 87, 44, 89, 45, 91, 46, 93, 47, 95, - 48, 97, 49, 99, 50, 101, 51, 103, 52, 105, 53, 107, 54, 109, 55, 111, 56, - 113, 57, 115, 58, 117, 59, 119, 60, 121, 61, 123, 62, 125, 63, 127, 64, - 129, 65, 131, 66, 133, 67, 135, 68, 137, 69, 139, 70, 141, 71, 143, 72, - 145, 73, 147, 74, 149, 75, 151, 76, 153, 77, 155, 78, 157, 79, 159, 80, - 161, 81, 163, 82, 165, 83, 167, 84, 169, 85, 171, 86, 173, 87, 175, 88, - 177, 89, 179, 90, 181, 91, 183, 92, 185, 93, 187, 94, 189, 95, 191, 96, - 193, 97, 195, 98, 197, 99, 199, 100, 201, 101, 203, 102, 205, 103, 207, - 104, 209, 105, 211, 106, 213, 107, 215, 108, 217, 109, 219, 110, 221, 111, - 223, 112, 225, 113, 227, 114, 229, 115, 231, 116, 233, 117, 235, 118, 237, - 119, 239, 120, 241, 121, 243, 122, 245, 123, 247, 124, 249, 125, 251, 126, - 253, 127, 255, 128, 257, 129, 259, 130, 261, 131, 263, 132, 265, 133, 267, - 134, 269, 135, 271, 136, 273, 137, 275, 138, 277, 139, 279, 140, 281, 141, - 283, 142, 285, 143, 287, 144, 289, 145, 291, 146, 293, 147, 295, 148, 297, - 149, 299, 150, 301, 151, 303, 152, 305, 153, 307, 154, 309, 155, 311, 156, - 313, 157, 315, 158, 317, 159, 319, 160, 321, 161, 323, 162, 325, 163, 327, - 164, 329, 165, 331, 166, 333, 167, 335, 168, 337, 169, 339, 170, 341, 171, - 343, 172, 345, 173, 347, 174, 349, 175, 351, 176, 353, 177, 355, 178, 357, - 179, 359, 180, 361, 181, 363, 182, 365, 183, 367, 184, 369, 185, 371, 186, - 373, 187, 375, 188, 377, 189, 379, 190, 381, 191, 383, 192, 385, 193, 387, - 0, 389, 0, 1, 0, 38, 2, 0, 65, 65, 97, 97, 2, 0, 66, 66, 98, 98, 2, 0, - 79, 79, 111, 111, 2, 0, 82, 82, 114, 114, 2, 0, 84, 84, 116, 116, 2, 0, - 67, 67, 99, 99, 2, 0, 73, 73, 105, 105, 2, 0, 78, 78, 110, 110, 2, 0, 68, - 68, 100, 100, 2, 0, 70, 70, 102, 102, 2, 0, 69, 69, 101, 101, 2, 0, 76, - 76, 108, 108, 2, 0, 89, 89, 121, 121, 2, 0, 90, 90, 122, 122, 2, 0, 83, - 83, 115, 115, 2, 0, 72, 72, 104, 104, 2, 0, 85, 85, 117, 117, 2, 0, 77, - 77, 109, 109, 2, 0, 71, 71, 103, 103, 2, 0, 87, 87, 119, 119, 2, 0, 75, - 75, 107, 107, 2, 0, 80, 80, 112, 112, 2, 0, 88, 88, 120, 120, 2, 0, 86, - 86, 118, 118, 2, 0, 74, 74, 106, 106, 2, 0, 81, 81, 113, 113, 1, 0, 34, - 34, 1, 0, 96, 96, 1, 0, 93, 93, 3, 0, 65, 90, 95, 95, 97, 122, 4, 0, 48, - 57, 65, 90, 95, 95, 97, 122, 2, 0, 43, 43, 45, 45, 3, 0, 36, 36, 58, 58, - 64, 64, 1, 0, 39, 39, 2, 0, 10, 10, 13, 13, 3, 0, 9, 11, 13, 13, 32, 32, - 3, 0, 48, 57, 65, 70, 97, 102, 1, 0, 48, 57, 1728, 0, 1, 1, 0, 0, 0, 0, - 3, 1, 0, 0, 0, 0, 5, 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, 0, 9, 1, 0, 0, 0, 0, - 11, 1, 0, 0, 0, 0, 13, 1, 0, 0, 0, 0, 15, 1, 0, 0, 0, 0, 17, 1, 0, 0, 0, - 0, 19, 1, 0, 0, 0, 0, 21, 1, 0, 0, 0, 0, 23, 1, 0, 0, 0, 0, 25, 1, 0, 0, - 0, 0, 27, 1, 0, 0, 0, 0, 29, 1, 0, 0, 0, 0, 31, 1, 0, 0, 0, 0, 33, 1, 0, - 0, 0, 0, 35, 1, 0, 0, 0, 0, 37, 1, 0, 0, 0, 0, 39, 1, 0, 0, 0, 0, 41, 1, - 0, 0, 0, 0, 43, 1, 0, 0, 0, 0, 45, 1, 0, 0, 0, 0, 47, 1, 0, 0, 0, 0, 49, - 1, 0, 0, 0, 0, 51, 1, 0, 0, 0, 0, 53, 1, 0, 0, 0, 0, 55, 1, 0, 0, 0, 0, - 57, 1, 0, 0, 0, 0, 59, 1, 0, 0, 0, 0, 61, 1, 0, 0, 0, 0, 63, 1, 0, 0, 0, - 0, 65, 1, 0, 0, 0, 0, 67, 1, 0, 0, 0, 0, 69, 1, 0, 0, 0, 0, 71, 1, 0, 0, - 0, 0, 73, 1, 0, 0, 0, 0, 75, 1, 0, 0, 0, 0, 77, 1, 0, 0, 0, 0, 79, 1, 0, - 0, 0, 0, 81, 1, 0, 0, 0, 0, 83, 1, 0, 0, 0, 0, 85, 1, 0, 0, 0, 0, 87, 1, - 0, 0, 0, 0, 89, 1, 0, 0, 0, 0, 91, 1, 0, 0, 0, 0, 93, 1, 0, 0, 0, 0, 95, - 1, 0, 0, 0, 0, 97, 1, 0, 0, 0, 0, 99, 1, 0, 0, 0, 0, 101, 1, 0, 0, 0, 0, - 103, 1, 0, 0, 0, 0, 105, 1, 0, 0, 0, 0, 107, 1, 0, 0, 0, 0, 109, 1, 0, - 0, 0, 0, 111, 1, 0, 0, 0, 0, 113, 1, 0, 0, 0, 0, 115, 1, 0, 0, 0, 0, 117, - 1, 0, 0, 0, 0, 119, 1, 0, 0, 0, 0, 121, 1, 0, 0, 0, 0, 123, 1, 0, 0, 0, - 0, 125, 1, 0, 0, 0, 0, 127, 1, 0, 0, 0, 0, 129, 1, 0, 0, 0, 0, 131, 1, - 0, 0, 0, 0, 133, 1, 0, 0, 0, 0, 135, 1, 0, 0, 0, 0, 137, 1, 0, 0, 0, 0, - 139, 1, 0, 0, 0, 0, 141, 1, 0, 0, 0, 0, 143, 1, 0, 0, 0, 0, 145, 1, 0, - 0, 0, 0, 147, 1, 0, 0, 0, 0, 149, 1, 0, 0, 0, 0, 151, 1, 0, 0, 0, 0, 153, - 1, 0, 0, 0, 0, 155, 1, 0, 0, 0, 0, 157, 1, 0, 0, 0, 0, 159, 1, 0, 0, 0, - 0, 161, 1, 0, 0, 0, 0, 163, 1, 0, 0, 0, 0, 165, 1, 0, 0, 0, 0, 167, 1, - 0, 0, 0, 0, 169, 1, 0, 0, 0, 0, 171, 1, 0, 0, 0, 0, 173, 1, 0, 0, 0, 0, - 175, 1, 0, 0, 0, 0, 177, 1, 0, 0, 0, 0, 179, 1, 0, 0, 0, 0, 181, 1, 0, - 0, 0, 0, 183, 1, 0, 0, 0, 0, 185, 1, 0, 0, 0, 0, 187, 1, 0, 0, 0, 0, 189, - 1, 0, 0, 0, 0, 191, 1, 0, 0, 0, 0, 193, 1, 0, 0, 0, 0, 195, 1, 0, 0, 0, - 0, 197, 1, 0, 0, 0, 0, 199, 1, 0, 0, 0, 0, 201, 1, 0, 0, 0, 0, 203, 1, - 0, 0, 0, 0, 205, 1, 0, 0, 0, 0, 207, 1, 0, 0, 0, 0, 209, 1, 0, 0, 0, 0, - 211, 1, 0, 0, 0, 0, 213, 1, 0, 0, 0, 0, 215, 1, 0, 0, 0, 0, 217, 1, 0, - 0, 0, 0, 219, 1, 0, 0, 0, 0, 221, 1, 0, 0, 0, 0, 223, 1, 0, 0, 0, 0, 225, - 1, 0, 0, 0, 0, 227, 1, 0, 0, 0, 0, 229, 1, 0, 0, 0, 0, 231, 1, 0, 0, 0, - 0, 233, 1, 0, 0, 0, 0, 235, 1, 0, 0, 0, 0, 237, 1, 0, 0, 0, 0, 239, 1, - 0, 0, 0, 0, 241, 1, 0, 0, 0, 0, 243, 1, 0, 0, 0, 0, 245, 1, 0, 0, 0, 0, - 247, 1, 0, 0, 0, 0, 249, 1, 0, 0, 0, 0, 251, 1, 0, 0, 0, 0, 253, 1, 0, - 0, 0, 0, 255, 1, 0, 0, 0, 0, 257, 1, 0, 0, 0, 0, 259, 1, 0, 0, 0, 0, 261, - 1, 0, 0, 0, 0, 263, 1, 0, 0, 0, 0, 265, 1, 0, 0, 0, 0, 267, 1, 0, 0, 0, - 0, 269, 1, 0, 0, 0, 0, 271, 1, 0, 0, 0, 0, 273, 1, 0, 0, 0, 0, 275, 1, - 0, 0, 0, 0, 277, 1, 0, 0, 0, 0, 279, 1, 0, 0, 0, 0, 281, 1, 0, 0, 0, 0, - 283, 1, 0, 0, 0, 0, 285, 1, 0, 0, 0, 0, 287, 1, 0, 0, 0, 0, 289, 1, 0, - 0, 0, 0, 291, 1, 0, 0, 0, 0, 293, 1, 0, 0, 0, 0, 295, 1, 0, 0, 0, 0, 297, - 1, 0, 0, 0, 0, 299, 1, 0, 0, 0, 0, 301, 1, 0, 0, 0, 0, 303, 1, 0, 0, 0, - 0, 305, 1, 0, 0, 0, 0, 307, 1, 0, 0, 0, 0, 309, 1, 0, 0, 0, 0, 311, 1, - 0, 0, 0, 0, 313, 1, 0, 0, 0, 0, 315, 1, 0, 0, 0, 0, 317, 1, 0, 0, 0, 0, - 319, 1, 0, 0, 0, 0, 321, 1, 0, 0, 0, 0, 323, 1, 0, 0, 0, 0, 325, 1, 0, - 0, 0, 0, 327, 1, 0, 0, 0, 0, 329, 1, 0, 0, 0, 0, 331, 1, 0, 0, 0, 0, 333, - 1, 0, 0, 0, 0, 335, 1, 0, 0, 0, 0, 337, 1, 0, 0, 0, 0, 339, 1, 0, 0, 0, - 0, 341, 1, 0, 0, 0, 0, 343, 1, 0, 0, 0, 0, 345, 1, 0, 0, 0, 0, 347, 1, - 0, 0, 0, 0, 349, 1, 0, 0, 0, 0, 351, 1, 0, 0, 0, 0, 353, 1, 0, 0, 0, 0, - 355, 1, 0, 0, 0, 0, 357, 1, 0, 0, 0, 0, 359, 1, 0, 0, 0, 0, 361, 1, 0, - 0, 0, 0, 363, 1, 0, 0, 0, 0, 365, 1, 0, 0, 0, 0, 367, 1, 0, 0, 0, 0, 369, - 1, 0, 0, 0, 0, 371, 1, 0, 0, 0, 0, 373, 1, 0, 0, 0, 0, 375, 1, 0, 0, 0, - 0, 377, 1, 0, 0, 0, 0, 379, 1, 0, 0, 0, 0, 381, 1, 0, 0, 0, 0, 383, 1, - 0, 0, 0, 0, 385, 1, 0, 0, 0, 1, 391, 1, 0, 0, 0, 3, 393, 1, 0, 0, 0, 5, - 395, 1, 0, 0, 0, 7, 397, 1, 0, 0, 0, 9, 399, 1, 0, 0, 0, 11, 401, 1, 0, - 0, 0, 13, 403, 1, 0, 0, 0, 15, 405, 1, 0, 0, 0, 17, 407, 1, 0, 0, 0, 19, - 409, 1, 0, 0, 0, 21, 411, 1, 0, 0, 0, 23, 414, 1, 0, 0, 0, 25, 416, 1, - 0, 0, 0, 27, 418, 1, 0, 0, 0, 29, 421, 1, 0, 0, 0, 31, 424, 1, 0, 0, 0, - 33, 426, 1, 0, 0, 0, 35, 428, 1, 0, 0, 0, 37, 430, 1, 0, 0, 0, 39, 433, - 1, 0, 0, 0, 41, 435, 1, 0, 0, 0, 43, 438, 1, 0, 0, 0, 45, 441, 1, 0, 0, - 0, 47, 444, 1, 0, 0, 0, 49, 447, 1, 0, 0, 0, 51, 453, 1, 0, 0, 0, 53, 460, - 1, 0, 0, 0, 55, 464, 1, 0, 0, 0, 57, 470, 1, 0, 0, 0, 59, 474, 1, 0, 0, - 0, 61, 480, 1, 0, 0, 0, 63, 488, 1, 0, 0, 0, 65, 492, 1, 0, 0, 0, 67, 495, - 1, 0, 0, 0, 69, 499, 1, 0, 0, 0, 71, 506, 1, 0, 0, 0, 73, 520, 1, 0, 0, - 0, 75, 527, 1, 0, 0, 0, 77, 533, 1, 0, 0, 0, 79, 541, 1, 0, 0, 0, 81, 544, - 1, 0, 0, 0, 83, 552, 1, 0, 0, 0, 85, 557, 1, 0, 0, 0, 87, 562, 1, 0, 0, - 0, 89, 568, 1, 0, 0, 0, 91, 576, 1, 0, 0, 0, 93, 583, 1, 0, 0, 0, 95, 590, - 1, 0, 0, 0, 97, 599, 1, 0, 0, 0, 99, 610, 1, 0, 0, 0, 101, 617, 1, 0, 0, - 0, 103, 623, 1, 0, 0, 0, 105, 636, 1, 0, 0, 0, 107, 649, 1, 0, 0, 0, 109, - 667, 1, 0, 0, 0, 111, 676, 1, 0, 0, 0, 113, 684, 1, 0, 0, 0, 115, 695, - 1, 0, 0, 0, 117, 704, 1, 0, 0, 0, 119, 711, 1, 0, 0, 0, 121, 716, 1, 0, - 0, 0, 123, 723, 1, 0, 0, 0, 125, 732, 1, 0, 0, 0, 127, 737, 1, 0, 0, 0, - 129, 742, 1, 0, 0, 0, 131, 747, 1, 0, 0, 0, 133, 751, 1, 0, 0, 0, 135, - 758, 1, 0, 0, 0, 137, 765, 1, 0, 0, 0, 139, 775, 1, 0, 0, 0, 141, 782, - 1, 0, 0, 0, 143, 790, 1, 0, 0, 0, 145, 795, 1, 0, 0, 0, 147, 799, 1, 0, - 0, 0, 149, 807, 1, 0, 0, 0, 151, 812, 1, 0, 0, 0, 153, 817, 1, 0, 0, 0, - 155, 822, 1, 0, 0, 0, 157, 828, 1, 0, 0, 0, 159, 835, 1, 0, 0, 0, 161, - 838, 1, 0, 0, 0, 163, 845, 1, 0, 0, 0, 165, 855, 1, 0, 0, 0, 167, 858, - 1, 0, 0, 0, 169, 864, 1, 0, 0, 0, 171, 872, 1, 0, 0, 0, 173, 882, 1, 0, - 0, 0, 175, 888, 1, 0, 0, 0, 177, 895, 1, 0, 0, 0, 179, 903, 1, 0, 0, 0, - 181, 913, 1, 0, 0, 0, 183, 918, 1, 0, 0, 0, 185, 921, 1, 0, 0, 0, 187, - 928, 1, 0, 0, 0, 189, 933, 1, 0, 0, 0, 191, 937, 1, 0, 0, 0, 193, 942, - 1, 0, 0, 0, 195, 947, 1, 0, 0, 0, 197, 953, 1, 0, 0, 0, 199, 959, 1, 0, - 0, 0, 201, 967, 1, 0, 0, 0, 203, 970, 1, 0, 0, 0, 205, 974, 1, 0, 0, 0, - 207, 982, 1, 0, 0, 0, 209, 987, 1, 0, 0, 0, 211, 990, 1, 0, 0, 0, 213, - 997, 1, 0, 0, 0, 215, 1000, 1, 0, 0, 0, 217, 1003, 1, 0, 0, 0, 219, 1009, - 1, 0, 0, 0, 221, 1015, 1, 0, 0, 0, 223, 1020, 1, 0, 0, 0, 225, 1027, 1, - 0, 0, 0, 227, 1035, 1, 0, 0, 0, 229, 1041, 1, 0, 0, 0, 231, 1047, 1, 0, - 0, 0, 233, 1057, 1, 0, 0, 0, 235, 1068, 1, 0, 0, 0, 237, 1075, 1, 0, 0, - 0, 239, 1083, 1, 0, 0, 0, 241, 1091, 1, 0, 0, 0, 243, 1098, 1, 0, 0, 0, - 245, 1106, 1, 0, 0, 0, 247, 1115, 1, 0, 0, 0, 249, 1125, 1, 0, 0, 0, 251, - 1131, 1, 0, 0, 0, 253, 1140, 1, 0, 0, 0, 255, 1144, 1, 0, 0, 0, 257, 1149, - 1, 0, 0, 0, 259, 1159, 1, 0, 0, 0, 261, 1166, 1, 0, 0, 0, 263, 1170, 1, - 0, 0, 0, 265, 1176, 1, 0, 0, 0, 267, 1181, 1, 0, 0, 0, 269, 1191, 1, 0, - 0, 0, 271, 1196, 1, 0, 0, 0, 273, 1199, 1, 0, 0, 0, 275, 1211, 1, 0, 0, - 0, 277, 1219, 1, 0, 0, 0, 279, 1225, 1, 0, 0, 0, 281, 1232, 1, 0, 0, 0, - 283, 1239, 1, 0, 0, 0, 285, 1245, 1, 0, 0, 0, 287, 1252, 1, 0, 0, 0, 289, - 1259, 1, 0, 0, 0, 291, 1264, 1, 0, 0, 0, 293, 1272, 1, 0, 0, 0, 295, 1277, - 1, 0, 0, 0, 297, 1283, 1, 0, 0, 0, 299, 1288, 1, 0, 0, 0, 301, 1296, 1, - 0, 0, 0, 303, 1308, 1, 0, 0, 0, 305, 1313, 1, 0, 0, 0, 307, 1323, 1, 0, - 0, 0, 309, 1329, 1, 0, 0, 0, 311, 1339, 1, 0, 0, 0, 313, 1349, 1, 0, 0, - 0, 315, 1357, 1, 0, 0, 0, 317, 1367, 1, 0, 0, 0, 319, 1377, 1, 0, 0, 0, - 321, 1388, 1, 0, 0, 0, 323, 1392, 1, 0, 0, 0, 325, 1403, 1, 0, 0, 0, 327, - 1408, 1, 0, 0, 0, 329, 1418, 1, 0, 0, 0, 331, 1424, 1, 0, 0, 0, 333, 1437, - 1, 0, 0, 0, 335, 1442, 1, 0, 0, 0, 337, 1453, 1, 0, 0, 0, 339, 1463, 1, - 0, 0, 0, 341, 1470, 1, 0, 0, 0, 343, 1477, 1, 0, 0, 0, 345, 1482, 1, 0, - 0, 0, 347, 1488, 1, 0, 0, 0, 349, 1495, 1, 0, 0, 0, 351, 1501, 1, 0, 0, - 0, 353, 1507, 1, 0, 0, 0, 355, 1512, 1, 0, 0, 0, 357, 1519, 1, 0, 0, 0, - 359, 1526, 1, 0, 0, 0, 361, 1534, 1, 0, 0, 0, 363, 1539, 1, 0, 0, 0, 365, - 1546, 1, 0, 0, 0, 367, 1549, 1, 0, 0, 0, 369, 1592, 1, 0, 0, 0, 371, 1635, - 1, 0, 0, 0, 373, 1646, 1, 0, 0, 0, 375, 1648, 1, 0, 0, 0, 377, 1659, 1, - 0, 0, 0, 379, 1662, 1, 0, 0, 0, 381, 1680, 1, 0, 0, 0, 383, 1694, 1, 0, - 0, 0, 385, 1698, 1, 0, 0, 0, 387, 1700, 1, 0, 0, 0, 389, 1702, 1, 0, 0, - 0, 391, 392, 5, 59, 0, 0, 392, 2, 1, 0, 0, 0, 393, 394, 5, 46, 0, 0, 394, - 4, 1, 0, 0, 0, 395, 396, 5, 40, 0, 0, 396, 6, 1, 0, 0, 0, 397, 398, 5, - 41, 0, 0, 398, 8, 1, 0, 0, 0, 399, 400, 5, 44, 0, 0, 400, 10, 1, 0, 0, - 0, 401, 402, 5, 61, 0, 0, 402, 12, 1, 0, 0, 0, 403, 404, 5, 42, 0, 0, 404, - 14, 1, 0, 0, 0, 405, 406, 5, 43, 0, 0, 406, 16, 1, 0, 0, 0, 407, 408, 5, - 45, 0, 0, 408, 18, 1, 0, 0, 0, 409, 410, 5, 126, 0, 0, 410, 20, 1, 0, 0, - 0, 411, 412, 5, 124, 0, 0, 412, 413, 5, 124, 0, 0, 413, 22, 1, 0, 0, 0, - 414, 415, 5, 47, 0, 0, 415, 24, 1, 0, 0, 0, 416, 417, 5, 37, 0, 0, 417, - 26, 1, 0, 0, 0, 418, 419, 5, 60, 0, 0, 419, 420, 5, 60, 0, 0, 420, 28, - 1, 0, 0, 0, 421, 422, 5, 62, 0, 0, 422, 423, 5, 62, 0, 0, 423, 30, 1, 0, - 0, 0, 424, 425, 5, 38, 0, 0, 425, 32, 1, 0, 0, 0, 426, 427, 5, 124, 0, - 0, 427, 34, 1, 0, 0, 0, 428, 429, 5, 60, 0, 0, 429, 36, 1, 0, 0, 0, 430, - 431, 5, 60, 0, 0, 431, 432, 5, 61, 0, 0, 432, 38, 1, 0, 0, 0, 433, 434, - 5, 62, 0, 0, 434, 40, 1, 0, 0, 0, 435, 436, 5, 62, 0, 0, 436, 437, 5, 61, - 0, 0, 437, 42, 1, 0, 0, 0, 438, 439, 5, 61, 0, 0, 439, 440, 5, 61, 0, 0, - 440, 44, 1, 0, 0, 0, 441, 442, 5, 33, 0, 0, 442, 443, 5, 61, 0, 0, 443, - 46, 1, 0, 0, 0, 444, 445, 5, 60, 0, 0, 445, 446, 5, 62, 0, 0, 446, 48, - 1, 0, 0, 0, 447, 448, 7, 0, 0, 0, 448, 449, 7, 1, 0, 0, 449, 450, 7, 2, - 0, 0, 450, 451, 7, 3, 0, 0, 451, 452, 7, 4, 0, 0, 452, 50, 1, 0, 0, 0, - 453, 454, 7, 0, 0, 0, 454, 455, 7, 5, 0, 0, 455, 456, 7, 4, 0, 0, 456, - 457, 7, 6, 0, 0, 457, 458, 7, 2, 0, 0, 458, 459, 7, 7, 0, 0, 459, 52, 1, - 0, 0, 0, 460, 461, 7, 0, 0, 0, 461, 462, 7, 8, 0, 0, 462, 463, 7, 8, 0, - 0, 463, 54, 1, 0, 0, 0, 464, 465, 7, 0, 0, 0, 465, 466, 7, 9, 0, 0, 466, - 467, 7, 4, 0, 0, 467, 468, 7, 10, 0, 0, 468, 469, 7, 3, 0, 0, 469, 56, - 1, 0, 0, 0, 470, 471, 7, 0, 0, 0, 471, 472, 7, 11, 0, 0, 472, 473, 7, 11, - 0, 0, 473, 58, 1, 0, 0, 0, 474, 475, 7, 0, 0, 0, 475, 476, 7, 11, 0, 0, - 476, 477, 7, 4, 0, 0, 477, 478, 7, 10, 0, 0, 478, 479, 7, 3, 0, 0, 479, - 60, 1, 0, 0, 0, 480, 481, 7, 0, 0, 0, 481, 482, 7, 7, 0, 0, 482, 483, 7, - 0, 0, 0, 483, 484, 7, 11, 0, 0, 484, 485, 7, 12, 0, 0, 485, 486, 7, 13, - 0, 0, 486, 487, 7, 10, 0, 0, 487, 62, 1, 0, 0, 0, 488, 489, 7, 0, 0, 0, - 489, 490, 7, 7, 0, 0, 490, 491, 7, 8, 0, 0, 491, 64, 1, 0, 0, 0, 492, 493, - 7, 0, 0, 0, 493, 494, 7, 14, 0, 0, 494, 66, 1, 0, 0, 0, 495, 496, 7, 0, - 0, 0, 496, 497, 7, 14, 0, 0, 497, 498, 7, 5, 0, 0, 498, 68, 1, 0, 0, 0, - 499, 500, 7, 0, 0, 0, 500, 501, 7, 4, 0, 0, 501, 502, 7, 4, 0, 0, 502, - 503, 7, 0, 0, 0, 503, 504, 7, 5, 0, 0, 504, 505, 7, 15, 0, 0, 505, 70, - 1, 0, 0, 0, 506, 507, 7, 0, 0, 0, 507, 508, 7, 16, 0, 0, 508, 509, 7, 4, - 0, 0, 509, 510, 7, 2, 0, 0, 510, 511, 7, 6, 0, 0, 511, 512, 7, 7, 0, 0, - 512, 513, 7, 5, 0, 0, 513, 514, 7, 3, 0, 0, 514, 515, 7, 10, 0, 0, 515, - 516, 7, 17, 0, 0, 516, 517, 7, 10, 0, 0, 517, 518, 7, 7, 0, 0, 518, 519, - 7, 4, 0, 0, 519, 72, 1, 0, 0, 0, 520, 521, 7, 1, 0, 0, 521, 522, 7, 10, - 0, 0, 522, 523, 7, 9, 0, 0, 523, 524, 7, 2, 0, 0, 524, 525, 7, 3, 0, 0, - 525, 526, 7, 10, 0, 0, 526, 74, 1, 0, 0, 0, 527, 528, 7, 1, 0, 0, 528, - 529, 7, 10, 0, 0, 529, 530, 7, 18, 0, 0, 530, 531, 7, 6, 0, 0, 531, 532, - 7, 7, 0, 0, 532, 76, 1, 0, 0, 0, 533, 534, 7, 1, 0, 0, 534, 535, 7, 10, - 0, 0, 535, 536, 7, 4, 0, 0, 536, 537, 7, 19, 0, 0, 537, 538, 7, 10, 0, - 0, 538, 539, 7, 10, 0, 0, 539, 540, 7, 7, 0, 0, 540, 78, 1, 0, 0, 0, 541, - 542, 7, 1, 0, 0, 542, 543, 7, 12, 0, 0, 543, 80, 1, 0, 0, 0, 544, 545, - 7, 5, 0, 0, 545, 546, 7, 0, 0, 0, 546, 547, 7, 14, 0, 0, 547, 548, 7, 5, - 0, 0, 548, 549, 7, 0, 0, 0, 549, 550, 7, 8, 0, 0, 550, 551, 7, 10, 0, 0, - 551, 82, 1, 0, 0, 0, 552, 553, 7, 5, 0, 0, 553, 554, 7, 0, 0, 0, 554, 555, - 7, 14, 0, 0, 555, 556, 7, 10, 0, 0, 556, 84, 1, 0, 0, 0, 557, 558, 7, 5, - 0, 0, 558, 559, 7, 0, 0, 0, 559, 560, 7, 14, 0, 0, 560, 561, 7, 4, 0, 0, - 561, 86, 1, 0, 0, 0, 562, 563, 7, 5, 0, 0, 563, 564, 7, 15, 0, 0, 564, - 565, 7, 10, 0, 0, 565, 566, 7, 5, 0, 0, 566, 567, 7, 20, 0, 0, 567, 88, - 1, 0, 0, 0, 568, 569, 7, 5, 0, 0, 569, 570, 7, 2, 0, 0, 570, 571, 7, 11, - 0, 0, 571, 572, 7, 11, 0, 0, 572, 573, 7, 0, 0, 0, 573, 574, 7, 4, 0, 0, - 574, 575, 7, 10, 0, 0, 575, 90, 1, 0, 0, 0, 576, 577, 7, 5, 0, 0, 577, - 578, 7, 2, 0, 0, 578, 579, 7, 11, 0, 0, 579, 580, 7, 16, 0, 0, 580, 581, - 7, 17, 0, 0, 581, 582, 7, 7, 0, 0, 582, 92, 1, 0, 0, 0, 583, 584, 7, 5, - 0, 0, 584, 585, 7, 2, 0, 0, 585, 586, 7, 17, 0, 0, 586, 587, 7, 17, 0, - 0, 587, 588, 7, 6, 0, 0, 588, 589, 7, 4, 0, 0, 589, 94, 1, 0, 0, 0, 590, - 591, 7, 5, 0, 0, 591, 592, 7, 2, 0, 0, 592, 593, 7, 7, 0, 0, 593, 594, - 7, 9, 0, 0, 594, 595, 7, 11, 0, 0, 595, 596, 7, 6, 0, 0, 596, 597, 7, 5, - 0, 0, 597, 598, 7, 4, 0, 0, 598, 96, 1, 0, 0, 0, 599, 600, 7, 5, 0, 0, - 600, 601, 7, 2, 0, 0, 601, 602, 7, 7, 0, 0, 602, 603, 7, 14, 0, 0, 603, - 604, 7, 4, 0, 0, 604, 605, 7, 3, 0, 0, 605, 606, 7, 0, 0, 0, 606, 607, - 7, 6, 0, 0, 607, 608, 7, 7, 0, 0, 608, 609, 7, 4, 0, 0, 609, 98, 1, 0, - 0, 0, 610, 611, 7, 5, 0, 0, 611, 612, 7, 3, 0, 0, 612, 613, 7, 10, 0, 0, - 613, 614, 7, 0, 0, 0, 614, 615, 7, 4, 0, 0, 615, 616, 7, 10, 0, 0, 616, - 100, 1, 0, 0, 0, 617, 618, 7, 5, 0, 0, 618, 619, 7, 3, 0, 0, 619, 620, - 7, 2, 0, 0, 620, 621, 7, 14, 0, 0, 621, 622, 7, 14, 0, 0, 622, 102, 1, - 0, 0, 0, 623, 624, 7, 5, 0, 0, 624, 625, 7, 16, 0, 0, 625, 626, 7, 3, 0, - 0, 626, 627, 7, 3, 0, 0, 627, 628, 7, 10, 0, 0, 628, 629, 7, 7, 0, 0, 629, - 630, 7, 4, 0, 0, 630, 631, 5, 95, 0, 0, 631, 632, 7, 8, 0, 0, 632, 633, - 7, 0, 0, 0, 633, 634, 7, 4, 0, 0, 634, 635, 7, 10, 0, 0, 635, 104, 1, 0, - 0, 0, 636, 637, 7, 5, 0, 0, 637, 638, 7, 16, 0, 0, 638, 639, 7, 3, 0, 0, - 639, 640, 7, 3, 0, 0, 640, 641, 7, 10, 0, 0, 641, 642, 7, 7, 0, 0, 642, - 643, 7, 4, 0, 0, 643, 644, 5, 95, 0, 0, 644, 645, 7, 4, 0, 0, 645, 646, - 7, 6, 0, 0, 646, 647, 7, 17, 0, 0, 647, 648, 7, 10, 0, 0, 648, 106, 1, - 0, 0, 0, 649, 650, 7, 5, 0, 0, 650, 651, 7, 16, 0, 0, 651, 652, 7, 3, 0, - 0, 652, 653, 7, 3, 0, 0, 653, 654, 7, 10, 0, 0, 654, 655, 7, 7, 0, 0, 655, - 656, 7, 4, 0, 0, 656, 657, 5, 95, 0, 0, 657, 658, 7, 4, 0, 0, 658, 659, - 7, 6, 0, 0, 659, 660, 7, 17, 0, 0, 660, 661, 7, 10, 0, 0, 661, 662, 7, - 14, 0, 0, 662, 663, 7, 4, 0, 0, 663, 664, 7, 0, 0, 0, 664, 665, 7, 17, - 0, 0, 665, 666, 7, 21, 0, 0, 666, 108, 1, 0, 0, 0, 667, 668, 7, 8, 0, 0, - 668, 669, 7, 0, 0, 0, 669, 670, 7, 4, 0, 0, 670, 671, 7, 0, 0, 0, 671, - 672, 7, 1, 0, 0, 672, 673, 7, 0, 0, 0, 673, 674, 7, 14, 0, 0, 674, 675, - 7, 10, 0, 0, 675, 110, 1, 0, 0, 0, 676, 677, 7, 8, 0, 0, 677, 678, 7, 10, - 0, 0, 678, 679, 7, 9, 0, 0, 679, 680, 7, 0, 0, 0, 680, 681, 7, 16, 0, 0, - 681, 682, 7, 11, 0, 0, 682, 683, 7, 4, 0, 0, 683, 112, 1, 0, 0, 0, 684, - 685, 7, 8, 0, 0, 685, 686, 7, 10, 0, 0, 686, 687, 7, 9, 0, 0, 687, 688, - 7, 10, 0, 0, 688, 689, 7, 3, 0, 0, 689, 690, 7, 3, 0, 0, 690, 691, 7, 0, - 0, 0, 691, 692, 7, 1, 0, 0, 692, 693, 7, 11, 0, 0, 693, 694, 7, 10, 0, - 0, 694, 114, 1, 0, 0, 0, 695, 696, 7, 8, 0, 0, 696, 697, 7, 10, 0, 0, 697, - 698, 7, 9, 0, 0, 698, 699, 7, 10, 0, 0, 699, 700, 7, 3, 0, 0, 700, 701, - 7, 3, 0, 0, 701, 702, 7, 10, 0, 0, 702, 703, 7, 8, 0, 0, 703, 116, 1, 0, - 0, 0, 704, 705, 7, 8, 0, 0, 705, 706, 7, 10, 0, 0, 706, 707, 7, 11, 0, - 0, 707, 708, 7, 10, 0, 0, 708, 709, 7, 4, 0, 0, 709, 710, 7, 10, 0, 0, - 710, 118, 1, 0, 0, 0, 711, 712, 7, 8, 0, 0, 712, 713, 7, 10, 0, 0, 713, - 714, 7, 14, 0, 0, 714, 715, 7, 5, 0, 0, 715, 120, 1, 0, 0, 0, 716, 717, - 7, 8, 0, 0, 717, 718, 7, 10, 0, 0, 718, 719, 7, 4, 0, 0, 719, 720, 7, 0, - 0, 0, 720, 721, 7, 5, 0, 0, 721, 722, 7, 15, 0, 0, 722, 122, 1, 0, 0, 0, - 723, 724, 7, 8, 0, 0, 724, 725, 7, 6, 0, 0, 725, 726, 7, 14, 0, 0, 726, - 727, 7, 4, 0, 0, 727, 728, 7, 6, 0, 0, 728, 729, 7, 7, 0, 0, 729, 730, - 7, 5, 0, 0, 730, 731, 7, 4, 0, 0, 731, 124, 1, 0, 0, 0, 732, 733, 7, 8, - 0, 0, 733, 734, 7, 3, 0, 0, 734, 735, 7, 2, 0, 0, 735, 736, 7, 21, 0, 0, - 736, 126, 1, 0, 0, 0, 737, 738, 7, 10, 0, 0, 738, 739, 7, 0, 0, 0, 739, - 740, 7, 5, 0, 0, 740, 741, 7, 15, 0, 0, 741, 128, 1, 0, 0, 0, 742, 743, - 7, 10, 0, 0, 743, 744, 7, 11, 0, 0, 744, 745, 7, 14, 0, 0, 745, 746, 7, - 10, 0, 0, 746, 130, 1, 0, 0, 0, 747, 748, 7, 10, 0, 0, 748, 749, 7, 7, - 0, 0, 749, 750, 7, 8, 0, 0, 750, 132, 1, 0, 0, 0, 751, 752, 7, 10, 0, 0, - 752, 753, 7, 14, 0, 0, 753, 754, 7, 5, 0, 0, 754, 755, 7, 0, 0, 0, 755, - 756, 7, 21, 0, 0, 756, 757, 7, 10, 0, 0, 757, 134, 1, 0, 0, 0, 758, 759, - 7, 10, 0, 0, 759, 760, 7, 22, 0, 0, 760, 761, 7, 5, 0, 0, 761, 762, 7, - 10, 0, 0, 762, 763, 7, 21, 0, 0, 763, 764, 7, 4, 0, 0, 764, 136, 1, 0, - 0, 0, 765, 766, 7, 10, 0, 0, 766, 767, 7, 22, 0, 0, 767, 768, 7, 5, 0, - 0, 768, 769, 7, 11, 0, 0, 769, 770, 7, 16, 0, 0, 770, 771, 7, 14, 0, 0, - 771, 772, 7, 6, 0, 0, 772, 773, 7, 23, 0, 0, 773, 774, 7, 10, 0, 0, 774, - 138, 1, 0, 0, 0, 775, 776, 7, 10, 0, 0, 776, 777, 7, 22, 0, 0, 777, 778, - 7, 6, 0, 0, 778, 779, 7, 14, 0, 0, 779, 780, 7, 4, 0, 0, 780, 781, 7, 14, - 0, 0, 781, 140, 1, 0, 0, 0, 782, 783, 7, 10, 0, 0, 783, 784, 7, 22, 0, - 0, 784, 785, 7, 21, 0, 0, 785, 786, 7, 11, 0, 0, 786, 787, 7, 0, 0, 0, - 787, 788, 7, 6, 0, 0, 788, 789, 7, 7, 0, 0, 789, 142, 1, 0, 0, 0, 790, - 791, 7, 9, 0, 0, 791, 792, 7, 0, 0, 0, 792, 793, 7, 6, 0, 0, 793, 794, - 7, 11, 0, 0, 794, 144, 1, 0, 0, 0, 795, 796, 7, 9, 0, 0, 796, 797, 7, 2, - 0, 0, 797, 798, 7, 3, 0, 0, 798, 146, 1, 0, 0, 0, 799, 800, 7, 9, 0, 0, - 800, 801, 7, 2, 0, 0, 801, 802, 7, 3, 0, 0, 802, 803, 7, 10, 0, 0, 803, - 804, 7, 6, 0, 0, 804, 805, 7, 18, 0, 0, 805, 806, 7, 7, 0, 0, 806, 148, - 1, 0, 0, 0, 807, 808, 7, 9, 0, 0, 808, 809, 7, 3, 0, 0, 809, 810, 7, 2, - 0, 0, 810, 811, 7, 17, 0, 0, 811, 150, 1, 0, 0, 0, 812, 813, 7, 9, 0, 0, - 813, 814, 7, 16, 0, 0, 814, 815, 7, 11, 0, 0, 815, 816, 7, 11, 0, 0, 816, - 152, 1, 0, 0, 0, 817, 818, 7, 18, 0, 0, 818, 819, 7, 11, 0, 0, 819, 820, - 7, 2, 0, 0, 820, 821, 7, 1, 0, 0, 821, 154, 1, 0, 0, 0, 822, 823, 7, 18, - 0, 0, 823, 824, 7, 3, 0, 0, 824, 825, 7, 2, 0, 0, 825, 826, 7, 16, 0, 0, - 826, 827, 7, 21, 0, 0, 827, 156, 1, 0, 0, 0, 828, 829, 7, 15, 0, 0, 829, - 830, 7, 0, 0, 0, 830, 831, 7, 23, 0, 0, 831, 832, 7, 6, 0, 0, 832, 833, - 7, 7, 0, 0, 833, 834, 7, 18, 0, 0, 834, 158, 1, 0, 0, 0, 835, 836, 7, 6, - 0, 0, 836, 837, 7, 9, 0, 0, 837, 160, 1, 0, 0, 0, 838, 839, 7, 6, 0, 0, - 839, 840, 7, 18, 0, 0, 840, 841, 7, 7, 0, 0, 841, 842, 7, 2, 0, 0, 842, - 843, 7, 3, 0, 0, 843, 844, 7, 10, 0, 0, 844, 162, 1, 0, 0, 0, 845, 846, - 7, 6, 0, 0, 846, 847, 7, 17, 0, 0, 847, 848, 7, 17, 0, 0, 848, 849, 7, - 10, 0, 0, 849, 850, 7, 8, 0, 0, 850, 851, 7, 6, 0, 0, 851, 852, 7, 0, 0, - 0, 852, 853, 7, 4, 0, 0, 853, 854, 7, 10, 0, 0, 854, 164, 1, 0, 0, 0, 855, - 856, 7, 6, 0, 0, 856, 857, 7, 7, 0, 0, 857, 166, 1, 0, 0, 0, 858, 859, - 7, 6, 0, 0, 859, 860, 7, 7, 0, 0, 860, 861, 7, 8, 0, 0, 861, 862, 7, 10, - 0, 0, 862, 863, 7, 22, 0, 0, 863, 168, 1, 0, 0, 0, 864, 865, 7, 6, 0, 0, - 865, 866, 7, 7, 0, 0, 866, 867, 7, 8, 0, 0, 867, 868, 7, 10, 0, 0, 868, - 869, 7, 22, 0, 0, 869, 870, 7, 10, 0, 0, 870, 871, 7, 8, 0, 0, 871, 170, - 1, 0, 0, 0, 872, 873, 7, 6, 0, 0, 873, 874, 7, 7, 0, 0, 874, 875, 7, 6, - 0, 0, 875, 876, 7, 4, 0, 0, 876, 877, 7, 6, 0, 0, 877, 878, 7, 0, 0, 0, - 878, 879, 7, 11, 0, 0, 879, 880, 7, 11, 0, 0, 880, 881, 7, 12, 0, 0, 881, - 172, 1, 0, 0, 0, 882, 883, 7, 6, 0, 0, 883, 884, 7, 7, 0, 0, 884, 885, - 7, 7, 0, 0, 885, 886, 7, 10, 0, 0, 886, 887, 7, 3, 0, 0, 887, 174, 1, 0, - 0, 0, 888, 889, 7, 6, 0, 0, 889, 890, 7, 7, 0, 0, 890, 891, 7, 14, 0, 0, - 891, 892, 7, 10, 0, 0, 892, 893, 7, 3, 0, 0, 893, 894, 7, 4, 0, 0, 894, - 176, 1, 0, 0, 0, 895, 896, 7, 6, 0, 0, 896, 897, 7, 7, 0, 0, 897, 898, - 7, 14, 0, 0, 898, 899, 7, 4, 0, 0, 899, 900, 7, 10, 0, 0, 900, 901, 7, - 0, 0, 0, 901, 902, 7, 8, 0, 0, 902, 178, 1, 0, 0, 0, 903, 904, 7, 6, 0, - 0, 904, 905, 7, 7, 0, 0, 905, 906, 7, 4, 0, 0, 906, 907, 7, 10, 0, 0, 907, - 908, 7, 3, 0, 0, 908, 909, 7, 14, 0, 0, 909, 910, 7, 10, 0, 0, 910, 911, - 7, 5, 0, 0, 911, 912, 7, 4, 0, 0, 912, 180, 1, 0, 0, 0, 913, 914, 7, 6, - 0, 0, 914, 915, 7, 7, 0, 0, 915, 916, 7, 4, 0, 0, 916, 917, 7, 2, 0, 0, - 917, 182, 1, 0, 0, 0, 918, 919, 7, 6, 0, 0, 919, 920, 7, 14, 0, 0, 920, - 184, 1, 0, 0, 0, 921, 922, 7, 6, 0, 0, 922, 923, 7, 14, 0, 0, 923, 924, - 7, 7, 0, 0, 924, 925, 7, 16, 0, 0, 925, 926, 7, 11, 0, 0, 926, 927, 7, - 11, 0, 0, 927, 186, 1, 0, 0, 0, 928, 929, 7, 24, 0, 0, 929, 930, 7, 2, - 0, 0, 930, 931, 7, 6, 0, 0, 931, 932, 7, 7, 0, 0, 932, 188, 1, 0, 0, 0, - 933, 934, 7, 20, 0, 0, 934, 935, 7, 10, 0, 0, 935, 936, 7, 12, 0, 0, 936, - 190, 1, 0, 0, 0, 937, 938, 7, 11, 0, 0, 938, 939, 7, 10, 0, 0, 939, 940, - 7, 9, 0, 0, 940, 941, 7, 4, 0, 0, 941, 192, 1, 0, 0, 0, 942, 943, 7, 11, - 0, 0, 943, 944, 7, 6, 0, 0, 944, 945, 7, 20, 0, 0, 945, 946, 7, 10, 0, - 0, 946, 194, 1, 0, 0, 0, 947, 948, 7, 11, 0, 0, 948, 949, 7, 6, 0, 0, 949, - 950, 7, 17, 0, 0, 950, 951, 7, 6, 0, 0, 951, 952, 7, 4, 0, 0, 952, 196, - 1, 0, 0, 0, 953, 954, 7, 17, 0, 0, 954, 955, 7, 0, 0, 0, 955, 956, 7, 4, - 0, 0, 956, 957, 7, 5, 0, 0, 957, 958, 7, 15, 0, 0, 958, 198, 1, 0, 0, 0, - 959, 960, 7, 7, 0, 0, 960, 961, 7, 0, 0, 0, 961, 962, 7, 4, 0, 0, 962, - 963, 7, 16, 0, 0, 963, 964, 7, 3, 0, 0, 964, 965, 7, 0, 0, 0, 965, 966, - 7, 11, 0, 0, 966, 200, 1, 0, 0, 0, 967, 968, 7, 7, 0, 0, 968, 969, 7, 2, - 0, 0, 969, 202, 1, 0, 0, 0, 970, 971, 7, 7, 0, 0, 971, 972, 7, 2, 0, 0, - 972, 973, 7, 4, 0, 0, 973, 204, 1, 0, 0, 0, 974, 975, 7, 7, 0, 0, 975, - 976, 7, 2, 0, 0, 976, 977, 7, 4, 0, 0, 977, 978, 7, 7, 0, 0, 978, 979, - 7, 16, 0, 0, 979, 980, 7, 11, 0, 0, 980, 981, 7, 11, 0, 0, 981, 206, 1, - 0, 0, 0, 982, 983, 7, 7, 0, 0, 983, 984, 7, 16, 0, 0, 984, 985, 7, 11, - 0, 0, 985, 986, 7, 11, 0, 0, 986, 208, 1, 0, 0, 0, 987, 988, 7, 2, 0, 0, - 988, 989, 7, 9, 0, 0, 989, 210, 1, 0, 0, 0, 990, 991, 7, 2, 0, 0, 991, - 992, 7, 9, 0, 0, 992, 993, 7, 9, 0, 0, 993, 994, 7, 14, 0, 0, 994, 995, - 7, 10, 0, 0, 995, 996, 7, 4, 0, 0, 996, 212, 1, 0, 0, 0, 997, 998, 7, 2, - 0, 0, 998, 999, 7, 7, 0, 0, 999, 214, 1, 0, 0, 0, 1000, 1001, 7, 2, 0, - 0, 1001, 1002, 7, 3, 0, 0, 1002, 216, 1, 0, 0, 0, 1003, 1004, 7, 2, 0, - 0, 1004, 1005, 7, 3, 0, 0, 1005, 1006, 7, 8, 0, 0, 1006, 1007, 7, 10, 0, - 0, 1007, 1008, 7, 3, 0, 0, 1008, 218, 1, 0, 0, 0, 1009, 1010, 7, 2, 0, - 0, 1010, 1011, 7, 16, 0, 0, 1011, 1012, 7, 4, 0, 0, 1012, 1013, 7, 10, - 0, 0, 1013, 1014, 7, 3, 0, 0, 1014, 220, 1, 0, 0, 0, 1015, 1016, 7, 21, - 0, 0, 1016, 1017, 7, 11, 0, 0, 1017, 1018, 7, 0, 0, 0, 1018, 1019, 7, 7, - 0, 0, 1019, 222, 1, 0, 0, 0, 1020, 1021, 7, 21, 0, 0, 1021, 1022, 7, 3, - 0, 0, 1022, 1023, 7, 0, 0, 0, 1023, 1024, 7, 18, 0, 0, 1024, 1025, 7, 17, - 0, 0, 1025, 1026, 7, 0, 0, 0, 1026, 224, 1, 0, 0, 0, 1027, 1028, 7, 21, - 0, 0, 1028, 1029, 7, 3, 0, 0, 1029, 1030, 7, 6, 0, 0, 1030, 1031, 7, 17, - 0, 0, 1031, 1032, 7, 0, 0, 0, 1032, 1033, 7, 3, 0, 0, 1033, 1034, 7, 12, - 0, 0, 1034, 226, 1, 0, 0, 0, 1035, 1036, 7, 25, 0, 0, 1036, 1037, 7, 16, - 0, 0, 1037, 1038, 7, 10, 0, 0, 1038, 1039, 7, 3, 0, 0, 1039, 1040, 7, 12, - 0, 0, 1040, 228, 1, 0, 0, 0, 1041, 1042, 7, 3, 0, 0, 1042, 1043, 7, 0, - 0, 0, 1043, 1044, 7, 6, 0, 0, 1044, 1045, 7, 14, 0, 0, 1045, 1046, 7, 10, - 0, 0, 1046, 230, 1, 0, 0, 0, 1047, 1048, 7, 3, 0, 0, 1048, 1049, 7, 10, - 0, 0, 1049, 1050, 7, 5, 0, 0, 1050, 1051, 7, 16, 0, 0, 1051, 1052, 7, 3, - 0, 0, 1052, 1053, 7, 14, 0, 0, 1053, 1054, 7, 6, 0, 0, 1054, 1055, 7, 23, - 0, 0, 1055, 1056, 7, 10, 0, 0, 1056, 232, 1, 0, 0, 0, 1057, 1058, 7, 3, - 0, 0, 1058, 1059, 7, 10, 0, 0, 1059, 1060, 7, 9, 0, 0, 1060, 1061, 7, 10, - 0, 0, 1061, 1062, 7, 3, 0, 0, 1062, 1063, 7, 10, 0, 0, 1063, 1064, 7, 7, - 0, 0, 1064, 1065, 7, 5, 0, 0, 1065, 1066, 7, 10, 0, 0, 1066, 1067, 7, 14, - 0, 0, 1067, 234, 1, 0, 0, 0, 1068, 1069, 7, 3, 0, 0, 1069, 1070, 7, 10, - 0, 0, 1070, 1071, 7, 18, 0, 0, 1071, 1072, 7, 10, 0, 0, 1072, 1073, 7, - 22, 0, 0, 1073, 1074, 7, 21, 0, 0, 1074, 236, 1, 0, 0, 0, 1075, 1076, 7, - 3, 0, 0, 1076, 1077, 7, 10, 0, 0, 1077, 1078, 7, 6, 0, 0, 1078, 1079, 7, - 7, 0, 0, 1079, 1080, 7, 8, 0, 0, 1080, 1081, 7, 10, 0, 0, 1081, 1082, 7, - 22, 0, 0, 1082, 238, 1, 0, 0, 0, 1083, 1084, 7, 3, 0, 0, 1084, 1085, 7, - 10, 0, 0, 1085, 1086, 7, 11, 0, 0, 1086, 1087, 7, 10, 0, 0, 1087, 1088, - 7, 0, 0, 0, 1088, 1089, 7, 14, 0, 0, 1089, 1090, 7, 10, 0, 0, 1090, 240, - 1, 0, 0, 0, 1091, 1092, 7, 3, 0, 0, 1092, 1093, 7, 10, 0, 0, 1093, 1094, - 7, 7, 0, 0, 1094, 1095, 7, 0, 0, 0, 1095, 1096, 7, 17, 0, 0, 1096, 1097, - 7, 10, 0, 0, 1097, 242, 1, 0, 0, 0, 1098, 1099, 7, 3, 0, 0, 1099, 1100, - 7, 10, 0, 0, 1100, 1101, 7, 21, 0, 0, 1101, 1102, 7, 11, 0, 0, 1102, 1103, - 7, 0, 0, 0, 1103, 1104, 7, 5, 0, 0, 1104, 1105, 7, 10, 0, 0, 1105, 244, - 1, 0, 0, 0, 1106, 1107, 7, 3, 0, 0, 1107, 1108, 7, 10, 0, 0, 1108, 1109, - 7, 14, 0, 0, 1109, 1110, 7, 4, 0, 0, 1110, 1111, 7, 3, 0, 0, 1111, 1112, - 7, 6, 0, 0, 1112, 1113, 7, 5, 0, 0, 1113, 1114, 7, 4, 0, 0, 1114, 246, - 1, 0, 0, 0, 1115, 1116, 7, 3, 0, 0, 1116, 1117, 7, 10, 0, 0, 1117, 1118, - 7, 4, 0, 0, 1118, 1119, 7, 16, 0, 0, 1119, 1120, 7, 3, 0, 0, 1120, 1121, - 7, 7, 0, 0, 1121, 1122, 7, 6, 0, 0, 1122, 1123, 7, 7, 0, 0, 1123, 1124, - 7, 18, 0, 0, 1124, 248, 1, 0, 0, 0, 1125, 1126, 7, 3, 0, 0, 1126, 1127, - 7, 6, 0, 0, 1127, 1128, 7, 18, 0, 0, 1128, 1129, 7, 15, 0, 0, 1129, 1130, - 7, 4, 0, 0, 1130, 250, 1, 0, 0, 0, 1131, 1132, 7, 3, 0, 0, 1132, 1133, - 7, 2, 0, 0, 1133, 1134, 7, 11, 0, 0, 1134, 1135, 7, 11, 0, 0, 1135, 1136, - 7, 1, 0, 0, 1136, 1137, 7, 0, 0, 0, 1137, 1138, 7, 5, 0, 0, 1138, 1139, - 7, 20, 0, 0, 1139, 252, 1, 0, 0, 0, 1140, 1141, 7, 3, 0, 0, 1141, 1142, - 7, 2, 0, 0, 1142, 1143, 7, 19, 0, 0, 1143, 254, 1, 0, 0, 0, 1144, 1145, - 7, 3, 0, 0, 1145, 1146, 7, 2, 0, 0, 1146, 1147, 7, 19, 0, 0, 1147, 1148, - 7, 14, 0, 0, 1148, 256, 1, 0, 0, 0, 1149, 1150, 7, 14, 0, 0, 1150, 1151, - 7, 0, 0, 0, 1151, 1152, 7, 23, 0, 0, 1152, 1153, 7, 10, 0, 0, 1153, 1154, - 7, 21, 0, 0, 1154, 1155, 7, 2, 0, 0, 1155, 1156, 7, 6, 0, 0, 1156, 1157, - 7, 7, 0, 0, 1157, 1158, 7, 4, 0, 0, 1158, 258, 1, 0, 0, 0, 1159, 1160, - 7, 14, 0, 0, 1160, 1161, 7, 10, 0, 0, 1161, 1162, 7, 11, 0, 0, 1162, 1163, - 7, 10, 0, 0, 1163, 1164, 7, 5, 0, 0, 1164, 1165, 7, 4, 0, 0, 1165, 260, - 1, 0, 0, 0, 1166, 1167, 7, 14, 0, 0, 1167, 1168, 7, 10, 0, 0, 1168, 1169, - 7, 4, 0, 0, 1169, 262, 1, 0, 0, 0, 1170, 1171, 7, 4, 0, 0, 1171, 1172, - 7, 0, 0, 0, 1172, 1173, 7, 1, 0, 0, 1173, 1174, 7, 11, 0, 0, 1174, 1175, - 7, 10, 0, 0, 1175, 264, 1, 0, 0, 0, 1176, 1177, 7, 4, 0, 0, 1177, 1178, - 7, 10, 0, 0, 1178, 1179, 7, 17, 0, 0, 1179, 1180, 7, 21, 0, 0, 1180, 266, - 1, 0, 0, 0, 1181, 1182, 7, 4, 0, 0, 1182, 1183, 7, 10, 0, 0, 1183, 1184, - 7, 17, 0, 0, 1184, 1185, 7, 21, 0, 0, 1185, 1186, 7, 2, 0, 0, 1186, 1187, - 7, 3, 0, 0, 1187, 1188, 7, 0, 0, 0, 1188, 1189, 7, 3, 0, 0, 1189, 1190, - 7, 12, 0, 0, 1190, 268, 1, 0, 0, 0, 1191, 1192, 7, 4, 0, 0, 1192, 1193, - 7, 15, 0, 0, 1193, 1194, 7, 10, 0, 0, 1194, 1195, 7, 7, 0, 0, 1195, 270, - 1, 0, 0, 0, 1196, 1197, 7, 4, 0, 0, 1197, 1198, 7, 2, 0, 0, 1198, 272, - 1, 0, 0, 0, 1199, 1200, 7, 4, 0, 0, 1200, 1201, 7, 3, 0, 0, 1201, 1202, - 7, 0, 0, 0, 1202, 1203, 7, 7, 0, 0, 1203, 1204, 7, 14, 0, 0, 1204, 1205, - 7, 0, 0, 0, 1205, 1206, 7, 5, 0, 0, 1206, 1207, 7, 4, 0, 0, 1207, 1208, - 7, 6, 0, 0, 1208, 1209, 7, 2, 0, 0, 1209, 1210, 7, 7, 0, 0, 1210, 274, - 1, 0, 0, 0, 1211, 1212, 7, 4, 0, 0, 1212, 1213, 7, 3, 0, 0, 1213, 1214, - 7, 6, 0, 0, 1214, 1215, 7, 18, 0, 0, 1215, 1216, 7, 18, 0, 0, 1216, 1217, - 7, 10, 0, 0, 1217, 1218, 7, 3, 0, 0, 1218, 276, 1, 0, 0, 0, 1219, 1220, - 7, 16, 0, 0, 1220, 1221, 7, 7, 0, 0, 1221, 1222, 7, 6, 0, 0, 1222, 1223, - 7, 2, 0, 0, 1223, 1224, 7, 7, 0, 0, 1224, 278, 1, 0, 0, 0, 1225, 1226, - 7, 16, 0, 0, 1226, 1227, 7, 7, 0, 0, 1227, 1228, 7, 6, 0, 0, 1228, 1229, - 7, 25, 0, 0, 1229, 1230, 7, 16, 0, 0, 1230, 1231, 7, 10, 0, 0, 1231, 280, - 1, 0, 0, 0, 1232, 1233, 7, 16, 0, 0, 1233, 1234, 7, 21, 0, 0, 1234, 1235, - 7, 8, 0, 0, 1235, 1236, 7, 0, 0, 0, 1236, 1237, 7, 4, 0, 0, 1237, 1238, - 7, 10, 0, 0, 1238, 282, 1, 0, 0, 0, 1239, 1240, 7, 16, 0, 0, 1240, 1241, - 7, 14, 0, 0, 1241, 1242, 7, 6, 0, 0, 1242, 1243, 7, 7, 0, 0, 1243, 1244, - 7, 18, 0, 0, 1244, 284, 1, 0, 0, 0, 1245, 1246, 7, 23, 0, 0, 1246, 1247, - 7, 0, 0, 0, 1247, 1248, 7, 5, 0, 0, 1248, 1249, 7, 16, 0, 0, 1249, 1250, - 7, 16, 0, 0, 1250, 1251, 7, 17, 0, 0, 1251, 286, 1, 0, 0, 0, 1252, 1253, - 7, 23, 0, 0, 1253, 1254, 7, 0, 0, 0, 1254, 1255, 7, 11, 0, 0, 1255, 1256, - 7, 16, 0, 0, 1256, 1257, 7, 10, 0, 0, 1257, 1258, 7, 14, 0, 0, 1258, 288, - 1, 0, 0, 0, 1259, 1260, 7, 23, 0, 0, 1260, 1261, 7, 6, 0, 0, 1261, 1262, - 7, 10, 0, 0, 1262, 1263, 7, 19, 0, 0, 1263, 290, 1, 0, 0, 0, 1264, 1265, - 7, 23, 0, 0, 1265, 1266, 7, 6, 0, 0, 1266, 1267, 7, 3, 0, 0, 1267, 1268, - 7, 4, 0, 0, 1268, 1269, 7, 16, 0, 0, 1269, 1270, 7, 0, 0, 0, 1270, 1271, - 7, 11, 0, 0, 1271, 292, 1, 0, 0, 0, 1272, 1273, 7, 19, 0, 0, 1273, 1274, - 7, 15, 0, 0, 1274, 1275, 7, 10, 0, 0, 1275, 1276, 7, 7, 0, 0, 1276, 294, - 1, 0, 0, 0, 1277, 1278, 7, 19, 0, 0, 1278, 1279, 7, 15, 0, 0, 1279, 1280, - 7, 10, 0, 0, 1280, 1281, 7, 3, 0, 0, 1281, 1282, 7, 10, 0, 0, 1282, 296, - 1, 0, 0, 0, 1283, 1284, 7, 19, 0, 0, 1284, 1285, 7, 6, 0, 0, 1285, 1286, - 7, 4, 0, 0, 1286, 1287, 7, 15, 0, 0, 1287, 298, 1, 0, 0, 0, 1288, 1289, - 7, 19, 0, 0, 1289, 1290, 7, 6, 0, 0, 1290, 1291, 7, 4, 0, 0, 1291, 1292, - 7, 15, 0, 0, 1292, 1293, 7, 2, 0, 0, 1293, 1294, 7, 16, 0, 0, 1294, 1295, - 7, 4, 0, 0, 1295, 300, 1, 0, 0, 0, 1296, 1297, 7, 9, 0, 0, 1297, 1298, - 7, 6, 0, 0, 1298, 1299, 7, 3, 0, 0, 1299, 1300, 7, 14, 0, 0, 1300, 1301, - 7, 4, 0, 0, 1301, 1302, 5, 95, 0, 0, 1302, 1303, 7, 23, 0, 0, 1303, 1304, - 7, 0, 0, 0, 1304, 1305, 7, 11, 0, 0, 1305, 1306, 7, 16, 0, 0, 1306, 1307, - 7, 10, 0, 0, 1307, 302, 1, 0, 0, 0, 1308, 1309, 7, 2, 0, 0, 1309, 1310, - 7, 23, 0, 0, 1310, 1311, 7, 10, 0, 0, 1311, 1312, 7, 3, 0, 0, 1312, 304, - 1, 0, 0, 0, 1313, 1314, 7, 21, 0, 0, 1314, 1315, 7, 0, 0, 0, 1315, 1316, - 7, 3, 0, 0, 1316, 1317, 7, 4, 0, 0, 1317, 1318, 7, 6, 0, 0, 1318, 1319, - 7, 4, 0, 0, 1319, 1320, 7, 6, 0, 0, 1320, 1321, 7, 2, 0, 0, 1321, 1322, - 7, 7, 0, 0, 1322, 306, 1, 0, 0, 0, 1323, 1324, 7, 3, 0, 0, 1324, 1325, - 7, 0, 0, 0, 1325, 1326, 7, 7, 0, 0, 1326, 1327, 7, 18, 0, 0, 1327, 1328, - 7, 10, 0, 0, 1328, 308, 1, 0, 0, 0, 1329, 1330, 7, 21, 0, 0, 1330, 1331, - 7, 3, 0, 0, 1331, 1332, 7, 10, 0, 0, 1332, 1333, 7, 5, 0, 0, 1333, 1334, - 7, 10, 0, 0, 1334, 1335, 7, 8, 0, 0, 1335, 1336, 7, 6, 0, 0, 1336, 1337, - 7, 7, 0, 0, 1337, 1338, 7, 18, 0, 0, 1338, 310, 1, 0, 0, 0, 1339, 1340, - 7, 16, 0, 0, 1340, 1341, 7, 7, 0, 0, 1341, 1342, 7, 1, 0, 0, 1342, 1343, - 7, 2, 0, 0, 1343, 1344, 7, 16, 0, 0, 1344, 1345, 7, 7, 0, 0, 1345, 1346, - 7, 8, 0, 0, 1346, 1347, 7, 10, 0, 0, 1347, 1348, 7, 8, 0, 0, 1348, 312, - 1, 0, 0, 0, 1349, 1350, 7, 5, 0, 0, 1350, 1351, 7, 16, 0, 0, 1351, 1352, - 7, 3, 0, 0, 1352, 1353, 7, 3, 0, 0, 1353, 1354, 7, 10, 0, 0, 1354, 1355, - 7, 7, 0, 0, 1355, 1356, 7, 4, 0, 0, 1356, 314, 1, 0, 0, 0, 1357, 1358, - 7, 9, 0, 0, 1358, 1359, 7, 2, 0, 0, 1359, 1360, 7, 11, 0, 0, 1360, 1361, - 7, 11, 0, 0, 1361, 1362, 7, 2, 0, 0, 1362, 1363, 7, 19, 0, 0, 1363, 1364, - 7, 6, 0, 0, 1364, 1365, 7, 7, 0, 0, 1365, 1366, 7, 18, 0, 0, 1366, 316, - 1, 0, 0, 0, 1367, 1368, 7, 5, 0, 0, 1368, 1369, 7, 16, 0, 0, 1369, 1370, - 7, 17, 0, 0, 1370, 1371, 7, 10, 0, 0, 1371, 1372, 5, 95, 0, 0, 1372, 1373, - 7, 8, 0, 0, 1373, 1374, 7, 6, 0, 0, 1374, 1375, 7, 14, 0, 0, 1375, 1376, - 7, 4, 0, 0, 1376, 318, 1, 0, 0, 0, 1377, 1378, 7, 8, 0, 0, 1378, 1379, - 7, 10, 0, 0, 1379, 1380, 7, 7, 0, 0, 1380, 1381, 7, 14, 0, 0, 1381, 1382, - 7, 10, 0, 0, 1382, 1383, 5, 95, 0, 0, 1383, 1384, 7, 3, 0, 0, 1384, 1385, - 7, 0, 0, 0, 1385, 1386, 7, 7, 0, 0, 1386, 1387, 7, 20, 0, 0, 1387, 320, - 1, 0, 0, 0, 1388, 1389, 7, 11, 0, 0, 1389, 1390, 7, 0, 0, 0, 1390, 1391, - 7, 18, 0, 0, 1391, 322, 1, 0, 0, 0, 1392, 1393, 7, 11, 0, 0, 1393, 1394, - 7, 0, 0, 0, 1394, 1395, 7, 14, 0, 0, 1395, 1396, 7, 4, 0, 0, 1396, 1397, - 5, 95, 0, 0, 1397, 1398, 7, 23, 0, 0, 1398, 1399, 7, 0, 0, 0, 1399, 1400, - 7, 11, 0, 0, 1400, 1401, 7, 16, 0, 0, 1401, 1402, 7, 10, 0, 0, 1402, 324, - 1, 0, 0, 0, 1403, 1404, 7, 11, 0, 0, 1404, 1405, 7, 10, 0, 0, 1405, 1406, - 7, 0, 0, 0, 1406, 1407, 7, 8, 0, 0, 1407, 326, 1, 0, 0, 0, 1408, 1409, - 7, 7, 0, 0, 1409, 1410, 7, 4, 0, 0, 1410, 1411, 7, 15, 0, 0, 1411, 1412, - 5, 95, 0, 0, 1412, 1413, 7, 23, 0, 0, 1413, 1414, 7, 0, 0, 0, 1414, 1415, - 7, 11, 0, 0, 1415, 1416, 7, 16, 0, 0, 1416, 1417, 7, 10, 0, 0, 1417, 328, - 1, 0, 0, 0, 1418, 1419, 7, 7, 0, 0, 1419, 1420, 7, 4, 0, 0, 1420, 1421, - 7, 6, 0, 0, 1421, 1422, 7, 11, 0, 0, 1422, 1423, 7, 10, 0, 0, 1423, 330, - 1, 0, 0, 0, 1424, 1425, 7, 21, 0, 0, 1425, 1426, 7, 10, 0, 0, 1426, 1427, - 7, 3, 0, 0, 1427, 1428, 7, 5, 0, 0, 1428, 1429, 7, 10, 0, 0, 1429, 1430, - 7, 7, 0, 0, 1430, 1431, 7, 4, 0, 0, 1431, 1432, 5, 95, 0, 0, 1432, 1433, - 7, 3, 0, 0, 1433, 1434, 7, 0, 0, 0, 1434, 1435, 7, 7, 0, 0, 1435, 1436, - 7, 20, 0, 0, 1436, 332, 1, 0, 0, 0, 1437, 1438, 7, 3, 0, 0, 1438, 1439, - 7, 0, 0, 0, 1439, 1440, 7, 7, 0, 0, 1440, 1441, 7, 20, 0, 0, 1441, 334, - 1, 0, 0, 0, 1442, 1443, 7, 3, 0, 0, 1443, 1444, 7, 2, 0, 0, 1444, 1445, - 7, 19, 0, 0, 1445, 1446, 5, 95, 0, 0, 1446, 1447, 7, 7, 0, 0, 1447, 1448, - 7, 16, 0, 0, 1448, 1449, 7, 17, 0, 0, 1449, 1450, 7, 1, 0, 0, 1450, 1451, - 7, 10, 0, 0, 1451, 1452, 7, 3, 0, 0, 1452, 336, 1, 0, 0, 0, 1453, 1454, - 7, 18, 0, 0, 1454, 1455, 7, 10, 0, 0, 1455, 1456, 7, 7, 0, 0, 1456, 1457, - 7, 10, 0, 0, 1457, 1458, 7, 3, 0, 0, 1458, 1459, 7, 0, 0, 0, 1459, 1460, - 7, 4, 0, 0, 1460, 1461, 7, 10, 0, 0, 1461, 1462, 7, 8, 0, 0, 1462, 338, - 1, 0, 0, 0, 1463, 1464, 7, 0, 0, 0, 1464, 1465, 7, 11, 0, 0, 1465, 1466, - 7, 19, 0, 0, 1466, 1467, 7, 0, 0, 0, 1467, 1468, 7, 12, 0, 0, 1468, 1469, - 7, 14, 0, 0, 1469, 340, 1, 0, 0, 0, 1470, 1471, 7, 14, 0, 0, 1471, 1472, - 7, 4, 0, 0, 1472, 1473, 7, 2, 0, 0, 1473, 1474, 7, 3, 0, 0, 1474, 1475, - 7, 10, 0, 0, 1475, 1476, 7, 8, 0, 0, 1476, 342, 1, 0, 0, 0, 1477, 1478, - 7, 4, 0, 0, 1478, 1479, 7, 3, 0, 0, 1479, 1480, 7, 16, 0, 0, 1480, 1481, - 7, 10, 0, 0, 1481, 344, 1, 0, 0, 0, 1482, 1483, 7, 9, 0, 0, 1483, 1484, - 7, 0, 0, 0, 1484, 1485, 7, 11, 0, 0, 1485, 1486, 7, 14, 0, 0, 1486, 1487, - 7, 10, 0, 0, 1487, 346, 1, 0, 0, 0, 1488, 1489, 7, 19, 0, 0, 1489, 1490, - 7, 6, 0, 0, 1490, 1491, 7, 7, 0, 0, 1491, 1492, 7, 8, 0, 0, 1492, 1493, - 7, 2, 0, 0, 1493, 1494, 7, 19, 0, 0, 1494, 348, 1, 0, 0, 0, 1495, 1496, - 7, 7, 0, 0, 1496, 1497, 7, 16, 0, 0, 1497, 1498, 7, 11, 0, 0, 1498, 1499, - 7, 11, 0, 0, 1499, 1500, 7, 14, 0, 0, 1500, 350, 1, 0, 0, 0, 1501, 1502, - 7, 9, 0, 0, 1502, 1503, 7, 6, 0, 0, 1503, 1504, 7, 3, 0, 0, 1504, 1505, - 7, 14, 0, 0, 1505, 1506, 7, 4, 0, 0, 1506, 352, 1, 0, 0, 0, 1507, 1508, - 7, 11, 0, 0, 1508, 1509, 7, 0, 0, 0, 1509, 1510, 7, 14, 0, 0, 1510, 1511, - 7, 4, 0, 0, 1511, 354, 1, 0, 0, 0, 1512, 1513, 7, 9, 0, 0, 1513, 1514, - 7, 6, 0, 0, 1514, 1515, 7, 11, 0, 0, 1515, 1516, 7, 4, 0, 0, 1516, 1517, - 7, 10, 0, 0, 1517, 1518, 7, 3, 0, 0, 1518, 356, 1, 0, 0, 0, 1519, 1520, - 7, 18, 0, 0, 1520, 1521, 7, 3, 0, 0, 1521, 1522, 7, 2, 0, 0, 1522, 1523, - 7, 16, 0, 0, 1523, 1524, 7, 21, 0, 0, 1524, 1525, 7, 14, 0, 0, 1525, 358, - 1, 0, 0, 0, 1526, 1527, 7, 10, 0, 0, 1527, 1528, 7, 22, 0, 0, 1528, 1529, - 7, 5, 0, 0, 1529, 1530, 7, 11, 0, 0, 1530, 1531, 7, 16, 0, 0, 1531, 1532, - 7, 8, 0, 0, 1532, 1533, 7, 10, 0, 0, 1533, 360, 1, 0, 0, 0, 1534, 1535, - 7, 4, 0, 0, 1535, 1536, 7, 6, 0, 0, 1536, 1537, 7, 10, 0, 0, 1537, 1538, - 7, 14, 0, 0, 1538, 362, 1, 0, 0, 0, 1539, 1540, 7, 2, 0, 0, 1540, 1541, - 7, 4, 0, 0, 1541, 1542, 7, 15, 0, 0, 1542, 1543, 7, 10, 0, 0, 1543, 1544, - 7, 3, 0, 0, 1544, 1545, 7, 14, 0, 0, 1545, 364, 1, 0, 0, 0, 1546, 1547, - 7, 8, 0, 0, 1547, 1548, 7, 2, 0, 0, 1548, 366, 1, 0, 0, 0, 1549, 1550, - 7, 7, 0, 0, 1550, 1551, 7, 2, 0, 0, 1551, 1552, 7, 4, 0, 0, 1552, 1553, - 7, 15, 0, 0, 1553, 1554, 7, 6, 0, 0, 1554, 1555, 7, 7, 0, 0, 1555, 1556, - 7, 18, 0, 0, 1556, 368, 1, 0, 0, 0, 1557, 1563, 5, 34, 0, 0, 1558, 1562, - 8, 26, 0, 0, 1559, 1560, 5, 34, 0, 0, 1560, 1562, 5, 34, 0, 0, 1561, 1558, - 1, 0, 0, 0, 1561, 1559, 1, 0, 0, 0, 1562, 1565, 1, 0, 0, 0, 1563, 1561, - 1, 0, 0, 0, 1563, 1564, 1, 0, 0, 0, 1564, 1566, 1, 0, 0, 0, 1565, 1563, - 1, 0, 0, 0, 1566, 1593, 5, 34, 0, 0, 1567, 1573, 5, 96, 0, 0, 1568, 1572, - 8, 27, 0, 0, 1569, 1570, 5, 96, 0, 0, 1570, 1572, 5, 96, 0, 0, 1571, 1568, - 1, 0, 0, 0, 1571, 1569, 1, 0, 0, 0, 1572, 1575, 1, 0, 0, 0, 1573, 1571, - 1, 0, 0, 0, 1573, 1574, 1, 0, 0, 0, 1574, 1576, 1, 0, 0, 0, 1575, 1573, - 1, 0, 0, 0, 1576, 1593, 5, 96, 0, 0, 1577, 1581, 5, 91, 0, 0, 1578, 1580, - 8, 28, 0, 0, 1579, 1578, 1, 0, 0, 0, 1580, 1583, 1, 0, 0, 0, 1581, 1579, - 1, 0, 0, 0, 1581, 1582, 1, 0, 0, 0, 1582, 1584, 1, 0, 0, 0, 1583, 1581, - 1, 0, 0, 0, 1584, 1593, 5, 93, 0, 0, 1585, 1589, 7, 29, 0, 0, 1586, 1588, - 7, 30, 0, 0, 1587, 1586, 1, 0, 0, 0, 1588, 1591, 1, 0, 0, 0, 1589, 1587, - 1, 0, 0, 0, 1589, 1590, 1, 0, 0, 0, 1590, 1593, 1, 0, 0, 0, 1591, 1589, - 1, 0, 0, 0, 1592, 1557, 1, 0, 0, 0, 1592, 1567, 1, 0, 0, 0, 1592, 1577, - 1, 0, 0, 0, 1592, 1585, 1, 0, 0, 0, 1593, 370, 1, 0, 0, 0, 1594, 1596, - 3, 389, 194, 0, 1595, 1594, 1, 0, 0, 0, 1596, 1597, 1, 0, 0, 0, 1597, 1595, - 1, 0, 0, 0, 1597, 1598, 1, 0, 0, 0, 1598, 1606, 1, 0, 0, 0, 1599, 1603, - 5, 46, 0, 0, 1600, 1602, 3, 389, 194, 0, 1601, 1600, 1, 0, 0, 0, 1602, - 1605, 1, 0, 0, 0, 1603, 1601, 1, 0, 0, 0, 1603, 1604, 1, 0, 0, 0, 1604, - 1607, 1, 0, 0, 0, 1605, 1603, 1, 0, 0, 0, 1606, 1599, 1, 0, 0, 0, 1606, - 1607, 1, 0, 0, 0, 1607, 1615, 1, 0, 0, 0, 1608, 1610, 5, 46, 0, 0, 1609, - 1611, 3, 389, 194, 0, 1610, 1609, 1, 0, 0, 0, 1611, 1612, 1, 0, 0, 0, 1612, - 1610, 1, 0, 0, 0, 1612, 1613, 1, 0, 0, 0, 1613, 1615, 1, 0, 0, 0, 1614, - 1595, 1, 0, 0, 0, 1614, 1608, 1, 0, 0, 0, 1615, 1625, 1, 0, 0, 0, 1616, - 1618, 7, 10, 0, 0, 1617, 1619, 7, 31, 0, 0, 1618, 1617, 1, 0, 0, 0, 1618, - 1619, 1, 0, 0, 0, 1619, 1621, 1, 0, 0, 0, 1620, 1622, 3, 389, 194, 0, 1621, - 1620, 1, 0, 0, 0, 1622, 1623, 1, 0, 0, 0, 1623, 1621, 1, 0, 0, 0, 1623, - 1624, 1, 0, 0, 0, 1624, 1626, 1, 0, 0, 0, 1625, 1616, 1, 0, 0, 0, 1625, - 1626, 1, 0, 0, 0, 1626, 1636, 1, 0, 0, 0, 1627, 1628, 5, 48, 0, 0, 1628, - 1629, 7, 22, 0, 0, 1629, 1631, 1, 0, 0, 0, 1630, 1632, 3, 387, 193, 0, - 1631, 1630, 1, 0, 0, 0, 1632, 1633, 1, 0, 0, 0, 1633, 1631, 1, 0, 0, 0, - 1633, 1634, 1, 0, 0, 0, 1634, 1636, 1, 0, 0, 0, 1635, 1614, 1, 0, 0, 0, - 1635, 1627, 1, 0, 0, 0, 1636, 372, 1, 0, 0, 0, 1637, 1641, 5, 63, 0, 0, - 1638, 1640, 3, 389, 194, 0, 1639, 1638, 1, 0, 0, 0, 1640, 1643, 1, 0, 0, - 0, 1641, 1639, 1, 0, 0, 0, 1641, 1642, 1, 0, 0, 0, 1642, 1647, 1, 0, 0, - 0, 1643, 1641, 1, 0, 0, 0, 1644, 1645, 7, 32, 0, 0, 1645, 1647, 3, 369, - 184, 0, 1646, 1637, 1, 0, 0, 0, 1646, 1644, 1, 0, 0, 0, 1647, 374, 1, 0, - 0, 0, 1648, 1654, 5, 39, 0, 0, 1649, 1653, 8, 33, 0, 0, 1650, 1651, 5, - 39, 0, 0, 1651, 1653, 5, 39, 0, 0, 1652, 1649, 1, 0, 0, 0, 1652, 1650, - 1, 0, 0, 0, 1653, 1656, 1, 0, 0, 0, 1654, 1652, 1, 0, 0, 0, 1654, 1655, - 1, 0, 0, 0, 1655, 1657, 1, 0, 0, 0, 1656, 1654, 1, 0, 0, 0, 1657, 1658, - 5, 39, 0, 0, 1658, 376, 1, 0, 0, 0, 1659, 1660, 7, 22, 0, 0, 1660, 1661, - 3, 375, 187, 0, 1661, 378, 1, 0, 0, 0, 1662, 1663, 5, 45, 0, 0, 1663, 1664, - 5, 45, 0, 0, 1664, 1668, 1, 0, 0, 0, 1665, 1667, 8, 34, 0, 0, 1666, 1665, - 1, 0, 0, 0, 1667, 1670, 1, 0, 0, 0, 1668, 1666, 1, 0, 0, 0, 1668, 1669, - 1, 0, 0, 0, 1669, 1676, 1, 0, 0, 0, 1670, 1668, 1, 0, 0, 0, 1671, 1673, - 5, 13, 0, 0, 1672, 1671, 1, 0, 0, 0, 1672, 1673, 1, 0, 0, 0, 1673, 1674, - 1, 0, 0, 0, 1674, 1677, 5, 10, 0, 0, 1675, 1677, 5, 0, 0, 1, 1676, 1672, - 1, 0, 0, 0, 1676, 1675, 1, 0, 0, 0, 1677, 1678, 1, 0, 0, 0, 1678, 1679, - 6, 189, 0, 0, 1679, 380, 1, 0, 0, 0, 1680, 1681, 5, 47, 0, 0, 1681, 1682, - 5, 42, 0, 0, 1682, 1686, 1, 0, 0, 0, 1683, 1685, 9, 0, 0, 0, 1684, 1683, - 1, 0, 0, 0, 1685, 1688, 1, 0, 0, 0, 1686, 1687, 1, 0, 0, 0, 1686, 1684, - 1, 0, 0, 0, 1687, 1689, 1, 0, 0, 0, 1688, 1686, 1, 0, 0, 0, 1689, 1690, - 5, 42, 0, 0, 1690, 1691, 5, 47, 0, 0, 1691, 1692, 1, 0, 0, 0, 1692, 1693, - 6, 190, 0, 0, 1693, 382, 1, 0, 0, 0, 1694, 1695, 7, 35, 0, 0, 1695, 1696, - 1, 0, 0, 0, 1696, 1697, 6, 191, 0, 0, 1697, 384, 1, 0, 0, 0, 1698, 1699, - 9, 0, 0, 0, 1699, 386, 1, 0, 0, 0, 1700, 1701, 7, 36, 0, 0, 1701, 388, - 1, 0, 0, 0, 1702, 1703, 7, 37, 0, 0, 1703, 390, 1, 0, 0, 0, 26, 0, 1561, - 1563, 1571, 1573, 1581, 1589, 1592, 1597, 1603, 1606, 1612, 1614, 1618, - 1623, 1625, 1633, 1635, 1641, 1646, 1652, 1654, 1668, 1672, 1676, 1686, - 1, 0, 1, 0, - } - deserializer := antlr.NewATNDeserializer(nil) - staticData.atn = deserializer.Deserialize(staticData.serializedATN) - atn := staticData.atn - staticData.decisionToDFA = make([]*antlr.DFA, len(atn.DecisionToState)) - decisionToDFA := staticData.decisionToDFA - for index, state := range atn.DecisionToState { - decisionToDFA[index] = antlr.NewDFA(state, index) - } -} - -// SQLiteLexerInit initializes any static state used to implement SQLiteLexer. By default the -// static state used to implement the lexer is lazily initialized during the first call to -// NewSQLiteLexer(). You can call this function if you wish to initialize the static state ahead -// of time. -func SQLiteLexerInit() { - staticData := &sqlitelexerLexerStaticData - staticData.once.Do(sqlitelexerLexerInit) -} - -// NewSQLiteLexer produces a new lexer instance for the optional input antlr.CharStream. -func NewSQLiteLexer(input antlr.CharStream) *SQLiteLexer { - SQLiteLexerInit() - l := new(SQLiteLexer) - l.BaseLexer = antlr.NewBaseLexer(input) - staticData := &sqlitelexerLexerStaticData - l.Interpreter = antlr.NewLexerATNSimulator(l, staticData.atn, staticData.decisionToDFA, staticData.predictionContextCache) - l.channelNames = staticData.channelNames - l.modeNames = staticData.modeNames - l.RuleNames = staticData.ruleNames - l.LiteralNames = staticData.literalNames - l.SymbolicNames = staticData.symbolicNames - l.GrammarFileName = "SQLiteLexer.g4" - // TODO: l.EOF = antlr.TokenEOF - - return l -} - -// SQLiteLexer tokens. -const ( - SQLiteLexerSCOL = 1 - SQLiteLexerDOT = 2 - SQLiteLexerOPEN_PAR = 3 - SQLiteLexerCLOSE_PAR = 4 - SQLiteLexerCOMMA = 5 - SQLiteLexerASSIGN = 6 - SQLiteLexerSTAR = 7 - SQLiteLexerPLUS = 8 - SQLiteLexerMINUS = 9 - SQLiteLexerTILDE = 10 - SQLiteLexerPIPE2 = 11 - SQLiteLexerDIV = 12 - SQLiteLexerMOD = 13 - SQLiteLexerLT2 = 14 - SQLiteLexerGT2 = 15 - SQLiteLexerAMP = 16 - SQLiteLexerPIPE = 17 - SQLiteLexerLT = 18 - SQLiteLexerLT_EQ = 19 - SQLiteLexerGT = 20 - SQLiteLexerGT_EQ = 21 - SQLiteLexerEQ = 22 - SQLiteLexerNOT_EQ1 = 23 - SQLiteLexerNOT_EQ2 = 24 - SQLiteLexerABORT_ = 25 - SQLiteLexerACTION_ = 26 - SQLiteLexerADD_ = 27 - SQLiteLexerAFTER_ = 28 - SQLiteLexerALL_ = 29 - SQLiteLexerALTER_ = 30 - SQLiteLexerANALYZE_ = 31 - SQLiteLexerAND_ = 32 - SQLiteLexerAS_ = 33 - SQLiteLexerASC_ = 34 - SQLiteLexerATTACH_ = 35 - SQLiteLexerAUTOINCREMENT_ = 36 - SQLiteLexerBEFORE_ = 37 - SQLiteLexerBEGIN_ = 38 - SQLiteLexerBETWEEN_ = 39 - SQLiteLexerBY_ = 40 - SQLiteLexerCASCADE_ = 41 - SQLiteLexerCASE_ = 42 - SQLiteLexerCAST_ = 43 - SQLiteLexerCHECK_ = 44 - SQLiteLexerCOLLATE_ = 45 - SQLiteLexerCOLUMN_ = 46 - SQLiteLexerCOMMIT_ = 47 - SQLiteLexerCONFLICT_ = 48 - SQLiteLexerCONSTRAINT_ = 49 - SQLiteLexerCREATE_ = 50 - SQLiteLexerCROSS_ = 51 - SQLiteLexerCURRENT_DATE_ = 52 - SQLiteLexerCURRENT_TIME_ = 53 - SQLiteLexerCURRENT_TIMESTAMP_ = 54 - SQLiteLexerDATABASE_ = 55 - SQLiteLexerDEFAULT_ = 56 - SQLiteLexerDEFERRABLE_ = 57 - SQLiteLexerDEFERRED_ = 58 - SQLiteLexerDELETE_ = 59 - SQLiteLexerDESC_ = 60 - SQLiteLexerDETACH_ = 61 - SQLiteLexerDISTINCT_ = 62 - SQLiteLexerDROP_ = 63 - SQLiteLexerEACH_ = 64 - SQLiteLexerELSE_ = 65 - SQLiteLexerEND_ = 66 - SQLiteLexerESCAPE_ = 67 - SQLiteLexerEXCEPT_ = 68 - SQLiteLexerEXCLUSIVE_ = 69 - SQLiteLexerEXISTS_ = 70 - SQLiteLexerEXPLAIN_ = 71 - SQLiteLexerFAIL_ = 72 - SQLiteLexerFOR_ = 73 - SQLiteLexerFOREIGN_ = 74 - SQLiteLexerFROM_ = 75 - SQLiteLexerFULL_ = 76 - SQLiteLexerGLOB_ = 77 - SQLiteLexerGROUP_ = 78 - SQLiteLexerHAVING_ = 79 - SQLiteLexerIF_ = 80 - SQLiteLexerIGNORE_ = 81 - SQLiteLexerIMMEDIATE_ = 82 - SQLiteLexerIN_ = 83 - SQLiteLexerINDEX_ = 84 - SQLiteLexerINDEXED_ = 85 - SQLiteLexerINITIALLY_ = 86 - SQLiteLexerINNER_ = 87 - SQLiteLexerINSERT_ = 88 - SQLiteLexerINSTEAD_ = 89 - SQLiteLexerINTERSECT_ = 90 - SQLiteLexerINTO_ = 91 - SQLiteLexerIS_ = 92 - SQLiteLexerISNULL_ = 93 - SQLiteLexerJOIN_ = 94 - SQLiteLexerKEY_ = 95 - SQLiteLexerLEFT_ = 96 - SQLiteLexerLIKE_ = 97 - SQLiteLexerLIMIT_ = 98 - SQLiteLexerMATCH_ = 99 - SQLiteLexerNATURAL_ = 100 - SQLiteLexerNO_ = 101 - SQLiteLexerNOT_ = 102 - SQLiteLexerNOTNULL_ = 103 - SQLiteLexerNULL_ = 104 - SQLiteLexerOF_ = 105 - SQLiteLexerOFFSET_ = 106 - SQLiteLexerON_ = 107 - SQLiteLexerOR_ = 108 - SQLiteLexerORDER_ = 109 - SQLiteLexerOUTER_ = 110 - SQLiteLexerPLAN_ = 111 - SQLiteLexerPRAGMA_ = 112 - SQLiteLexerPRIMARY_ = 113 - SQLiteLexerQUERY_ = 114 - SQLiteLexerRAISE_ = 115 - SQLiteLexerRECURSIVE_ = 116 - SQLiteLexerREFERENCES_ = 117 - SQLiteLexerREGEXP_ = 118 - SQLiteLexerREINDEX_ = 119 - SQLiteLexerRELEASE_ = 120 - SQLiteLexerRENAME_ = 121 - SQLiteLexerREPLACE_ = 122 - SQLiteLexerRESTRICT_ = 123 - SQLiteLexerRETURNING_ = 124 - SQLiteLexerRIGHT_ = 125 - SQLiteLexerROLLBACK_ = 126 - SQLiteLexerROW_ = 127 - SQLiteLexerROWS_ = 128 - SQLiteLexerSAVEPOINT_ = 129 - SQLiteLexerSELECT_ = 130 - SQLiteLexerSET_ = 131 - SQLiteLexerTABLE_ = 132 - SQLiteLexerTEMP_ = 133 - SQLiteLexerTEMPORARY_ = 134 - SQLiteLexerTHEN_ = 135 - SQLiteLexerTO_ = 136 - SQLiteLexerTRANSACTION_ = 137 - SQLiteLexerTRIGGER_ = 138 - SQLiteLexerUNION_ = 139 - SQLiteLexerUNIQUE_ = 140 - SQLiteLexerUPDATE_ = 141 - SQLiteLexerUSING_ = 142 - SQLiteLexerVACUUM_ = 143 - SQLiteLexerVALUES_ = 144 - SQLiteLexerVIEW_ = 145 - SQLiteLexerVIRTUAL_ = 146 - SQLiteLexerWHEN_ = 147 - SQLiteLexerWHERE_ = 148 - SQLiteLexerWITH_ = 149 - SQLiteLexerWITHOUT_ = 150 - SQLiteLexerFIRST_VALUE_ = 151 - SQLiteLexerOVER_ = 152 - SQLiteLexerPARTITION_ = 153 - SQLiteLexerRANGE_ = 154 - SQLiteLexerPRECEDING_ = 155 - SQLiteLexerUNBOUNDED_ = 156 - SQLiteLexerCURRENT_ = 157 - SQLiteLexerFOLLOWING_ = 158 - SQLiteLexerCUME_DIST_ = 159 - SQLiteLexerDENSE_RANK_ = 160 - SQLiteLexerLAG_ = 161 - SQLiteLexerLAST_VALUE_ = 162 - SQLiteLexerLEAD_ = 163 - SQLiteLexerNTH_VALUE_ = 164 - SQLiteLexerNTILE_ = 165 - SQLiteLexerPERCENT_RANK_ = 166 - SQLiteLexerRANK_ = 167 - SQLiteLexerROW_NUMBER_ = 168 - SQLiteLexerGENERATED_ = 169 - SQLiteLexerALWAYS_ = 170 - SQLiteLexerSTORED_ = 171 - SQLiteLexerTRUE_ = 172 - SQLiteLexerFALSE_ = 173 - SQLiteLexerWINDOW_ = 174 - SQLiteLexerNULLS_ = 175 - SQLiteLexerFIRST_ = 176 - SQLiteLexerLAST_ = 177 - SQLiteLexerFILTER_ = 178 - SQLiteLexerGROUPS_ = 179 - SQLiteLexerEXCLUDE_ = 180 - SQLiteLexerTIES_ = 181 - SQLiteLexerOTHERS_ = 182 - SQLiteLexerDO_ = 183 - SQLiteLexerNOTHING_ = 184 - SQLiteLexerIDENTIFIER = 185 - SQLiteLexerNUMERIC_LITERAL = 186 - SQLiteLexerBIND_PARAMETER = 187 - SQLiteLexerSTRING_LITERAL = 188 - SQLiteLexerBLOB_LITERAL = 189 - SQLiteLexerSINGLE_LINE_COMMENT = 190 - SQLiteLexerMULTILINE_COMMENT = 191 - SQLiteLexerSPACES = 192 - SQLiteLexerUNEXPECTED_CHAR = 193 -) +// Code generated from SQLiteLexer.g4 by ANTLR 4.12.0. DO NOT EDIT. + +package sqliteparser + +import ( + "fmt" + "sync" + "unicode" + + "github.com/antlr/antlr4/runtime/Go/antlr/v4" +) + +// Suppress unused import error +var _ = fmt.Printf +var _ = sync.Once{} +var _ = unicode.IsLetter + +type SQLiteLexer struct { + *antlr.BaseLexer + channelNames []string + modeNames []string + // TODO: EOF string +} + +var sqlitelexerLexerStaticData struct { + once sync.Once + serializedATN []int32 + channelNames []string + modeNames []string + literalNames []string + symbolicNames []string + ruleNames []string + predictionContextCache *antlr.PredictionContextCache + atn *antlr.ATN + decisionToDFA []*antlr.DFA +} + +func sqlitelexerLexerInit() { + staticData := &sqlitelexerLexerStaticData + staticData.channelNames = []string{ + "DEFAULT_TOKEN_CHANNEL", "HIDDEN", + } + staticData.modeNames = []string{ + "DEFAULT_MODE", + } + staticData.literalNames = []string{ + "", "';'", "'.'", "'('", "')'", "','", "'='", "'*'", "'+'", "'-'", "'~'", + "'||'", "'/'", "'%'", "'<<'", "'>>'", "'&'", "'|'", "'<'", "'<='", "'>'", + "'>='", "'=='", "'!='", "'<>'", "'ABORT'", "'ACTION'", "'ADD'", "'AFTER'", + "'ALL'", "'ALTER'", "'ANALYZE'", "'AND'", "'AS'", "'ASC'", "'ATTACH'", + "'AUTOINCREMENT'", "'BEFORE'", "'BEGIN'", "'BETWEEN'", "'BY'", "'CASCADE'", + "'CASE'", "'CAST'", "'CHECK'", "'COLLATE'", "'COLUMN'", "'COMMIT'", + "'CONFLICT'", "'CONSTRAINT'", "'CREATE'", "'CROSS'", "'CURRENT_DATE'", + "'CURRENT_TIME'", "'CURRENT_TIMESTAMP'", "'DATABASE'", "'DEFAULT'", + "'DEFERRABLE'", "'DEFERRED'", "'DELETE'", "'DESC'", "'DETACH'", "'DISTINCT'", + "'DROP'", "'EACH'", "'ELSE'", "'END'", "'ESCAPE'", "'EXCEPT'", "'EXCLUSIVE'", + "'EXISTS'", "'EXPLAIN'", "'FAIL'", "'FOR'", "'FOREIGN'", "'FROM'", "'FULL'", + "'GLOB'", "'GROUP'", "'HAVING'", "'IF'", "'IGNORE'", "'IMMEDIATE'", + "'IN'", "'INDEX'", "'INDEXED'", "'INITIALLY'", "'INNER'", "'INSERT'", + "'INSTEAD'", "'INTERSECT'", "'INTO'", "'IS'", "'ISNULL'", "'JOIN'", + "'KEY'", "'LEFT'", "'LIKE'", "'LIMIT'", "'MATCH'", "'NATURAL'", "'NO'", + "'NOT'", "'NOTNULL'", "'NULL'", "'OF'", "'OFFSET'", "'ON'", "'OR'", + "'ORDER'", "'OUTER'", "'PLAN'", "'PRAGMA'", "'PRIMARY'", "'QUERY'", + "'RAISE'", "'RECURSIVE'", "'REFERENCES'", "'REGEXP'", "'REINDEX'", "'RELEASE'", + "'RENAME'", "'REPLACE'", "'RESTRICT'", "'RETURNING'", "'RIGHT'", "'ROLLBACK'", + "'ROW'", "'ROWS'", "'SAVEPOINT'", "'SELECT'", "'SET'", "'TABLE'", "'TEMP'", + "'TEMPORARY'", "'THEN'", "'TO'", "'TRANSACTION'", "'TRIGGER'", "'UNION'", + "'UNIQUE'", "'UPDATE'", "'USING'", "'VACUUM'", "'VALUES'", "'VIEW'", + "'VIRTUAL'", "'WHEN'", "'WHERE'", "'WITH'", "'WITHOUT'", "'FIRST_VALUE'", + "'OVER'", "'PARTITION'", "'RANGE'", "'PRECEDING'", "'UNBOUNDED'", "'CURRENT'", + "'FOLLOWING'", "'CUME_DIST'", "'DENSE_RANK'", "'LAG'", "'LAST_VALUE'", + "'LEAD'", "'NTH_VALUE'", "'NTILE'", "'PERCENT_RANK'", "'RANK'", "'ROW_NUMBER'", + "'GENERATED'", "'ALWAYS'", "'STORED'", "'TRUE'", "'FALSE'", "'WINDOW'", + "'NULLS'", "'FIRST'", "'LAST'", "'FILTER'", "'GROUPS'", "'EXCLUDE'", + "'TIES'", "'OTHERS'", "'DO'", "'NOTHING'", + } + staticData.symbolicNames = []string{ + "", "SCOL", "DOT", "OPEN_PAR", "CLOSE_PAR", "COMMA", "ASSIGN", "STAR", + "PLUS", "MINUS", "TILDE", "PIPE2", "DIV", "MOD", "LT2", "GT2", "AMP", + "PIPE", "LT", "LT_EQ", "GT", "GT_EQ", "EQ", "NOT_EQ1", "NOT_EQ2", "ABORT_", + "ACTION_", "ADD_", "AFTER_", "ALL_", "ALTER_", "ANALYZE_", "AND_", "AS_", + "ASC_", "ATTACH_", "AUTOINCREMENT_", "BEFORE_", "BEGIN_", "BETWEEN_", + "BY_", "CASCADE_", "CASE_", "CAST_", "CHECK_", "COLLATE_", "COLUMN_", + "COMMIT_", "CONFLICT_", "CONSTRAINT_", "CREATE_", "CROSS_", "CURRENT_DATE_", + "CURRENT_TIME_", "CURRENT_TIMESTAMP_", "DATABASE_", "DEFAULT_", "DEFERRABLE_", + "DEFERRED_", "DELETE_", "DESC_", "DETACH_", "DISTINCT_", "DROP_", "EACH_", + "ELSE_", "END_", "ESCAPE_", "EXCEPT_", "EXCLUSIVE_", "EXISTS_", "EXPLAIN_", + "FAIL_", "FOR_", "FOREIGN_", "FROM_", "FULL_", "GLOB_", "GROUP_", "HAVING_", + "IF_", "IGNORE_", "IMMEDIATE_", "IN_", "INDEX_", "INDEXED_", "INITIALLY_", + "INNER_", "INSERT_", "INSTEAD_", "INTERSECT_", "INTO_", "IS_", "ISNULL_", + "JOIN_", "KEY_", "LEFT_", "LIKE_", "LIMIT_", "MATCH_", "NATURAL_", "NO_", + "NOT_", "NOTNULL_", "NULL_", "OF_", "OFFSET_", "ON_", "OR_", "ORDER_", + "OUTER_", "PLAN_", "PRAGMA_", "PRIMARY_", "QUERY_", "RAISE_", "RECURSIVE_", + "REFERENCES_", "REGEXP_", "REINDEX_", "RELEASE_", "RENAME_", "REPLACE_", + "RESTRICT_", "RETURNING_", "RIGHT_", "ROLLBACK_", "ROW_", "ROWS_", "SAVEPOINT_", + "SELECT_", "SET_", "TABLE_", "TEMP_", "TEMPORARY_", "THEN_", "TO_", + "TRANSACTION_", "TRIGGER_", "UNION_", "UNIQUE_", "UPDATE_", "USING_", + "VACUUM_", "VALUES_", "VIEW_", "VIRTUAL_", "WHEN_", "WHERE_", "WITH_", + "WITHOUT_", "FIRST_VALUE_", "OVER_", "PARTITION_", "RANGE_", "PRECEDING_", + "UNBOUNDED_", "CURRENT_", "FOLLOWING_", "CUME_DIST_", "DENSE_RANK_", + "LAG_", "LAST_VALUE_", "LEAD_", "NTH_VALUE_", "NTILE_", "PERCENT_RANK_", + "RANK_", "ROW_NUMBER_", "GENERATED_", "ALWAYS_", "STORED_", "TRUE_", + "FALSE_", "WINDOW_", "NULLS_", "FIRST_", "LAST_", "FILTER_", "GROUPS_", + "EXCLUDE_", "TIES_", "OTHERS_", "DO_", "NOTHING_", "IDENTIFIER", "NUMERIC_LITERAL", + "BIND_PARAMETER", "STRING_LITERAL", "BLOB_LITERAL", "SINGLE_LINE_COMMENT", + "MULTILINE_COMMENT", "SPACES", "UNEXPECTED_CHAR", + } + staticData.ruleNames = []string{ + "SCOL", "DOT", "OPEN_PAR", "CLOSE_PAR", "COMMA", "ASSIGN", "STAR", "PLUS", + "MINUS", "TILDE", "PIPE2", "DIV", "MOD", "LT2", "GT2", "AMP", "PIPE", + "LT", "LT_EQ", "GT", "GT_EQ", "EQ", "NOT_EQ1", "NOT_EQ2", "ABORT_", + "ACTION_", "ADD_", "AFTER_", "ALL_", "ALTER_", "ANALYZE_", "AND_", "AS_", + "ASC_", "ATTACH_", "AUTOINCREMENT_", "BEFORE_", "BEGIN_", "BETWEEN_", + "BY_", "CASCADE_", "CASE_", "CAST_", "CHECK_", "COLLATE_", "COLUMN_", + "COMMIT_", "CONFLICT_", "CONSTRAINT_", "CREATE_", "CROSS_", "CURRENT_DATE_", + "CURRENT_TIME_", "CURRENT_TIMESTAMP_", "DATABASE_", "DEFAULT_", "DEFERRABLE_", + "DEFERRED_", "DELETE_", "DESC_", "DETACH_", "DISTINCT_", "DROP_", "EACH_", + "ELSE_", "END_", "ESCAPE_", "EXCEPT_", "EXCLUSIVE_", "EXISTS_", "EXPLAIN_", + "FAIL_", "FOR_", "FOREIGN_", "FROM_", "FULL_", "GLOB_", "GROUP_", "HAVING_", + "IF_", "IGNORE_", "IMMEDIATE_", "IN_", "INDEX_", "INDEXED_", "INITIALLY_", + "INNER_", "INSERT_", "INSTEAD_", "INTERSECT_", "INTO_", "IS_", "ISNULL_", + "JOIN_", "KEY_", "LEFT_", "LIKE_", "LIMIT_", "MATCH_", "NATURAL_", "NO_", + "NOT_", "NOTNULL_", "NULL_", "OF_", "OFFSET_", "ON_", "OR_", "ORDER_", + "OUTER_", "PLAN_", "PRAGMA_", "PRIMARY_", "QUERY_", "RAISE_", "RECURSIVE_", + "REFERENCES_", "REGEXP_", "REINDEX_", "RELEASE_", "RENAME_", "REPLACE_", + "RESTRICT_", "RETURNING_", "RIGHT_", "ROLLBACK_", "ROW_", "ROWS_", "SAVEPOINT_", + "SELECT_", "SET_", "TABLE_", "TEMP_", "TEMPORARY_", "THEN_", "TO_", + "TRANSACTION_", "TRIGGER_", "UNION_", "UNIQUE_", "UPDATE_", "USING_", + "VACUUM_", "VALUES_", "VIEW_", "VIRTUAL_", "WHEN_", "WHERE_", "WITH_", + "WITHOUT_", "FIRST_VALUE_", "OVER_", "PARTITION_", "RANGE_", "PRECEDING_", + "UNBOUNDED_", "CURRENT_", "FOLLOWING_", "CUME_DIST_", "DENSE_RANK_", + "LAG_", "LAST_VALUE_", "LEAD_", "NTH_VALUE_", "NTILE_", "PERCENT_RANK_", + "RANK_", "ROW_NUMBER_", "GENERATED_", "ALWAYS_", "STORED_", "TRUE_", + "FALSE_", "WINDOW_", "NULLS_", "FIRST_", "LAST_", "FILTER_", "GROUPS_", + "EXCLUDE_", "TIES_", "OTHERS_", "DO_", "NOTHING_", "IDENTIFIER", "NUMERIC_LITERAL", + "BIND_PARAMETER", "STRING_LITERAL", "BLOB_LITERAL", "SINGLE_LINE_COMMENT", + "MULTILINE_COMMENT", "SPACES", "UNEXPECTED_CHAR", "HEX_DIGIT", "DIGIT", + } + staticData.predictionContextCache = antlr.NewPredictionContextCache() + staticData.serializedATN = []int32{ + 4, 0, 193, 1704, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, + 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, + 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, + 15, 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, + 7, 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, + 25, 2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, + 2, 31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, + 36, 7, 36, 2, 37, 7, 37, 2, 38, 7, 38, 2, 39, 7, 39, 2, 40, 7, 40, 2, 41, + 7, 41, 2, 42, 7, 42, 2, 43, 7, 43, 2, 44, 7, 44, 2, 45, 7, 45, 2, 46, 7, + 46, 2, 47, 7, 47, 2, 48, 7, 48, 2, 49, 7, 49, 2, 50, 7, 50, 2, 51, 7, 51, + 2, 52, 7, 52, 2, 53, 7, 53, 2, 54, 7, 54, 2, 55, 7, 55, 2, 56, 7, 56, 2, + 57, 7, 57, 2, 58, 7, 58, 2, 59, 7, 59, 2, 60, 7, 60, 2, 61, 7, 61, 2, 62, + 7, 62, 2, 63, 7, 63, 2, 64, 7, 64, 2, 65, 7, 65, 2, 66, 7, 66, 2, 67, 7, + 67, 2, 68, 7, 68, 2, 69, 7, 69, 2, 70, 7, 70, 2, 71, 7, 71, 2, 72, 7, 72, + 2, 73, 7, 73, 2, 74, 7, 74, 2, 75, 7, 75, 2, 76, 7, 76, 2, 77, 7, 77, 2, + 78, 7, 78, 2, 79, 7, 79, 2, 80, 7, 80, 2, 81, 7, 81, 2, 82, 7, 82, 2, 83, + 7, 83, 2, 84, 7, 84, 2, 85, 7, 85, 2, 86, 7, 86, 2, 87, 7, 87, 2, 88, 7, + 88, 2, 89, 7, 89, 2, 90, 7, 90, 2, 91, 7, 91, 2, 92, 7, 92, 2, 93, 7, 93, + 2, 94, 7, 94, 2, 95, 7, 95, 2, 96, 7, 96, 2, 97, 7, 97, 2, 98, 7, 98, 2, + 99, 7, 99, 2, 100, 7, 100, 2, 101, 7, 101, 2, 102, 7, 102, 2, 103, 7, 103, + 2, 104, 7, 104, 2, 105, 7, 105, 2, 106, 7, 106, 2, 107, 7, 107, 2, 108, + 7, 108, 2, 109, 7, 109, 2, 110, 7, 110, 2, 111, 7, 111, 2, 112, 7, 112, + 2, 113, 7, 113, 2, 114, 7, 114, 2, 115, 7, 115, 2, 116, 7, 116, 2, 117, + 7, 117, 2, 118, 7, 118, 2, 119, 7, 119, 2, 120, 7, 120, 2, 121, 7, 121, + 2, 122, 7, 122, 2, 123, 7, 123, 2, 124, 7, 124, 2, 125, 7, 125, 2, 126, + 7, 126, 2, 127, 7, 127, 2, 128, 7, 128, 2, 129, 7, 129, 2, 130, 7, 130, + 2, 131, 7, 131, 2, 132, 7, 132, 2, 133, 7, 133, 2, 134, 7, 134, 2, 135, + 7, 135, 2, 136, 7, 136, 2, 137, 7, 137, 2, 138, 7, 138, 2, 139, 7, 139, + 2, 140, 7, 140, 2, 141, 7, 141, 2, 142, 7, 142, 2, 143, 7, 143, 2, 144, + 7, 144, 2, 145, 7, 145, 2, 146, 7, 146, 2, 147, 7, 147, 2, 148, 7, 148, + 2, 149, 7, 149, 2, 150, 7, 150, 2, 151, 7, 151, 2, 152, 7, 152, 2, 153, + 7, 153, 2, 154, 7, 154, 2, 155, 7, 155, 2, 156, 7, 156, 2, 157, 7, 157, + 2, 158, 7, 158, 2, 159, 7, 159, 2, 160, 7, 160, 2, 161, 7, 161, 2, 162, + 7, 162, 2, 163, 7, 163, 2, 164, 7, 164, 2, 165, 7, 165, 2, 166, 7, 166, + 2, 167, 7, 167, 2, 168, 7, 168, 2, 169, 7, 169, 2, 170, 7, 170, 2, 171, + 7, 171, 2, 172, 7, 172, 2, 173, 7, 173, 2, 174, 7, 174, 2, 175, 7, 175, + 2, 176, 7, 176, 2, 177, 7, 177, 2, 178, 7, 178, 2, 179, 7, 179, 2, 180, + 7, 180, 2, 181, 7, 181, 2, 182, 7, 182, 2, 183, 7, 183, 2, 184, 7, 184, + 2, 185, 7, 185, 2, 186, 7, 186, 2, 187, 7, 187, 2, 188, 7, 188, 2, 189, + 7, 189, 2, 190, 7, 190, 2, 191, 7, 191, 2, 192, 7, 192, 2, 193, 7, 193, + 2, 194, 7, 194, 1, 0, 1, 0, 1, 1, 1, 1, 1, 2, 1, 2, 1, 3, 1, 3, 1, 4, 1, + 4, 1, 5, 1, 5, 1, 6, 1, 6, 1, 7, 1, 7, 1, 8, 1, 8, 1, 9, 1, 9, 1, 10, 1, + 10, 1, 10, 1, 11, 1, 11, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13, 1, 14, 1, 14, + 1, 14, 1, 15, 1, 15, 1, 16, 1, 16, 1, 17, 1, 17, 1, 18, 1, 18, 1, 18, 1, + 19, 1, 19, 1, 20, 1, 20, 1, 20, 1, 21, 1, 21, 1, 21, 1, 22, 1, 22, 1, 22, + 1, 23, 1, 23, 1, 23, 1, 24, 1, 24, 1, 24, 1, 24, 1, 24, 1, 24, 1, 25, 1, + 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 26, 1, 26, 1, 26, 1, 26, 1, 27, + 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 28, 1, 28, 1, 28, 1, 28, 1, 29, 1, + 29, 1, 29, 1, 29, 1, 29, 1, 29, 1, 30, 1, 30, 1, 30, 1, 30, 1, 30, 1, 30, + 1, 30, 1, 30, 1, 31, 1, 31, 1, 31, 1, 31, 1, 32, 1, 32, 1, 32, 1, 33, 1, + 33, 1, 33, 1, 33, 1, 34, 1, 34, 1, 34, 1, 34, 1, 34, 1, 34, 1, 34, 1, 35, + 1, 35, 1, 35, 1, 35, 1, 35, 1, 35, 1, 35, 1, 35, 1, 35, 1, 35, 1, 35, 1, + 35, 1, 35, 1, 35, 1, 36, 1, 36, 1, 36, 1, 36, 1, 36, 1, 36, 1, 36, 1, 37, + 1, 37, 1, 37, 1, 37, 1, 37, 1, 37, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, + 38, 1, 38, 1, 38, 1, 39, 1, 39, 1, 39, 1, 40, 1, 40, 1, 40, 1, 40, 1, 40, + 1, 40, 1, 40, 1, 40, 1, 41, 1, 41, 1, 41, 1, 41, 1, 41, 1, 42, 1, 42, 1, + 42, 1, 42, 1, 42, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 44, 1, 44, + 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 45, 1, 45, 1, 45, 1, 45, 1, + 45, 1, 45, 1, 45, 1, 46, 1, 46, 1, 46, 1, 46, 1, 46, 1, 46, 1, 46, 1, 47, + 1, 47, 1, 47, 1, 47, 1, 47, 1, 47, 1, 47, 1, 47, 1, 47, 1, 48, 1, 48, 1, + 48, 1, 48, 1, 48, 1, 48, 1, 48, 1, 48, 1, 48, 1, 48, 1, 48, 1, 49, 1, 49, + 1, 49, 1, 49, 1, 49, 1, 49, 1, 49, 1, 50, 1, 50, 1, 50, 1, 50, 1, 50, 1, + 50, 1, 51, 1, 51, 1, 51, 1, 51, 1, 51, 1, 51, 1, 51, 1, 51, 1, 51, 1, 51, + 1, 51, 1, 51, 1, 51, 1, 52, 1, 52, 1, 52, 1, 52, 1, 52, 1, 52, 1, 52, 1, + 52, 1, 52, 1, 52, 1, 52, 1, 52, 1, 52, 1, 53, 1, 53, 1, 53, 1, 53, 1, 53, + 1, 53, 1, 53, 1, 53, 1, 53, 1, 53, 1, 53, 1, 53, 1, 53, 1, 53, 1, 53, 1, + 53, 1, 53, 1, 53, 1, 54, 1, 54, 1, 54, 1, 54, 1, 54, 1, 54, 1, 54, 1, 54, + 1, 54, 1, 55, 1, 55, 1, 55, 1, 55, 1, 55, 1, 55, 1, 55, 1, 55, 1, 56, 1, + 56, 1, 56, 1, 56, 1, 56, 1, 56, 1, 56, 1, 56, 1, 56, 1, 56, 1, 56, 1, 57, + 1, 57, 1, 57, 1, 57, 1, 57, 1, 57, 1, 57, 1, 57, 1, 57, 1, 58, 1, 58, 1, + 58, 1, 58, 1, 58, 1, 58, 1, 58, 1, 59, 1, 59, 1, 59, 1, 59, 1, 59, 1, 60, + 1, 60, 1, 60, 1, 60, 1, 60, 1, 60, 1, 60, 1, 61, 1, 61, 1, 61, 1, 61, 1, + 61, 1, 61, 1, 61, 1, 61, 1, 61, 1, 62, 1, 62, 1, 62, 1, 62, 1, 62, 1, 63, + 1, 63, 1, 63, 1, 63, 1, 63, 1, 64, 1, 64, 1, 64, 1, 64, 1, 64, 1, 65, 1, + 65, 1, 65, 1, 65, 1, 66, 1, 66, 1, 66, 1, 66, 1, 66, 1, 66, 1, 66, 1, 67, + 1, 67, 1, 67, 1, 67, 1, 67, 1, 67, 1, 67, 1, 68, 1, 68, 1, 68, 1, 68, 1, + 68, 1, 68, 1, 68, 1, 68, 1, 68, 1, 68, 1, 69, 1, 69, 1, 69, 1, 69, 1, 69, + 1, 69, 1, 69, 1, 70, 1, 70, 1, 70, 1, 70, 1, 70, 1, 70, 1, 70, 1, 70, 1, + 71, 1, 71, 1, 71, 1, 71, 1, 71, 1, 72, 1, 72, 1, 72, 1, 72, 1, 73, 1, 73, + 1, 73, 1, 73, 1, 73, 1, 73, 1, 73, 1, 73, 1, 74, 1, 74, 1, 74, 1, 74, 1, + 74, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 76, 1, 76, 1, 76, 1, 76, 1, 76, + 1, 77, 1, 77, 1, 77, 1, 77, 1, 77, 1, 77, 1, 78, 1, 78, 1, 78, 1, 78, 1, + 78, 1, 78, 1, 78, 1, 79, 1, 79, 1, 79, 1, 80, 1, 80, 1, 80, 1, 80, 1, 80, + 1, 80, 1, 80, 1, 81, 1, 81, 1, 81, 1, 81, 1, 81, 1, 81, 1, 81, 1, 81, 1, + 81, 1, 81, 1, 82, 1, 82, 1, 82, 1, 83, 1, 83, 1, 83, 1, 83, 1, 83, 1, 83, + 1, 84, 1, 84, 1, 84, 1, 84, 1, 84, 1, 84, 1, 84, 1, 84, 1, 85, 1, 85, 1, + 85, 1, 85, 1, 85, 1, 85, 1, 85, 1, 85, 1, 85, 1, 85, 1, 86, 1, 86, 1, 86, + 1, 86, 1, 86, 1, 86, 1, 87, 1, 87, 1, 87, 1, 87, 1, 87, 1, 87, 1, 87, 1, + 88, 1, 88, 1, 88, 1, 88, 1, 88, 1, 88, 1, 88, 1, 88, 1, 89, 1, 89, 1, 89, + 1, 89, 1, 89, 1, 89, 1, 89, 1, 89, 1, 89, 1, 89, 1, 90, 1, 90, 1, 90, 1, + 90, 1, 90, 1, 91, 1, 91, 1, 91, 1, 92, 1, 92, 1, 92, 1, 92, 1, 92, 1, 92, + 1, 92, 1, 93, 1, 93, 1, 93, 1, 93, 1, 93, 1, 94, 1, 94, 1, 94, 1, 94, 1, + 95, 1, 95, 1, 95, 1, 95, 1, 95, 1, 96, 1, 96, 1, 96, 1, 96, 1, 96, 1, 97, + 1, 97, 1, 97, 1, 97, 1, 97, 1, 97, 1, 98, 1, 98, 1, 98, 1, 98, 1, 98, 1, + 98, 1, 99, 1, 99, 1, 99, 1, 99, 1, 99, 1, 99, 1, 99, 1, 99, 1, 100, 1, + 100, 1, 100, 1, 101, 1, 101, 1, 101, 1, 101, 1, 102, 1, 102, 1, 102, 1, + 102, 1, 102, 1, 102, 1, 102, 1, 102, 1, 103, 1, 103, 1, 103, 1, 103, 1, + 103, 1, 104, 1, 104, 1, 104, 1, 105, 1, 105, 1, 105, 1, 105, 1, 105, 1, + 105, 1, 105, 1, 106, 1, 106, 1, 106, 1, 107, 1, 107, 1, 107, 1, 108, 1, + 108, 1, 108, 1, 108, 1, 108, 1, 108, 1, 109, 1, 109, 1, 109, 1, 109, 1, + 109, 1, 109, 1, 110, 1, 110, 1, 110, 1, 110, 1, 110, 1, 111, 1, 111, 1, + 111, 1, 111, 1, 111, 1, 111, 1, 111, 1, 112, 1, 112, 1, 112, 1, 112, 1, + 112, 1, 112, 1, 112, 1, 112, 1, 113, 1, 113, 1, 113, 1, 113, 1, 113, 1, + 113, 1, 114, 1, 114, 1, 114, 1, 114, 1, 114, 1, 114, 1, 115, 1, 115, 1, + 115, 1, 115, 1, 115, 1, 115, 1, 115, 1, 115, 1, 115, 1, 115, 1, 116, 1, + 116, 1, 116, 1, 116, 1, 116, 1, 116, 1, 116, 1, 116, 1, 116, 1, 116, 1, + 116, 1, 117, 1, 117, 1, 117, 1, 117, 1, 117, 1, 117, 1, 117, 1, 118, 1, + 118, 1, 118, 1, 118, 1, 118, 1, 118, 1, 118, 1, 118, 1, 119, 1, 119, 1, + 119, 1, 119, 1, 119, 1, 119, 1, 119, 1, 119, 1, 120, 1, 120, 1, 120, 1, + 120, 1, 120, 1, 120, 1, 120, 1, 121, 1, 121, 1, 121, 1, 121, 1, 121, 1, + 121, 1, 121, 1, 121, 1, 122, 1, 122, 1, 122, 1, 122, 1, 122, 1, 122, 1, + 122, 1, 122, 1, 122, 1, 123, 1, 123, 1, 123, 1, 123, 1, 123, 1, 123, 1, + 123, 1, 123, 1, 123, 1, 123, 1, 124, 1, 124, 1, 124, 1, 124, 1, 124, 1, + 124, 1, 125, 1, 125, 1, 125, 1, 125, 1, 125, 1, 125, 1, 125, 1, 125, 1, + 125, 1, 126, 1, 126, 1, 126, 1, 126, 1, 127, 1, 127, 1, 127, 1, 127, 1, + 127, 1, 128, 1, 128, 1, 128, 1, 128, 1, 128, 1, 128, 1, 128, 1, 128, 1, + 128, 1, 128, 1, 129, 1, 129, 1, 129, 1, 129, 1, 129, 1, 129, 1, 129, 1, + 130, 1, 130, 1, 130, 1, 130, 1, 131, 1, 131, 1, 131, 1, 131, 1, 131, 1, + 131, 1, 132, 1, 132, 1, 132, 1, 132, 1, 132, 1, 133, 1, 133, 1, 133, 1, + 133, 1, 133, 1, 133, 1, 133, 1, 133, 1, 133, 1, 133, 1, 134, 1, 134, 1, + 134, 1, 134, 1, 134, 1, 135, 1, 135, 1, 135, 1, 136, 1, 136, 1, 136, 1, + 136, 1, 136, 1, 136, 1, 136, 1, 136, 1, 136, 1, 136, 1, 136, 1, 136, 1, + 137, 1, 137, 1, 137, 1, 137, 1, 137, 1, 137, 1, 137, 1, 137, 1, 138, 1, + 138, 1, 138, 1, 138, 1, 138, 1, 138, 1, 139, 1, 139, 1, 139, 1, 139, 1, + 139, 1, 139, 1, 139, 1, 140, 1, 140, 1, 140, 1, 140, 1, 140, 1, 140, 1, + 140, 1, 141, 1, 141, 1, 141, 1, 141, 1, 141, 1, 141, 1, 142, 1, 142, 1, + 142, 1, 142, 1, 142, 1, 142, 1, 142, 1, 143, 1, 143, 1, 143, 1, 143, 1, + 143, 1, 143, 1, 143, 1, 144, 1, 144, 1, 144, 1, 144, 1, 144, 1, 145, 1, + 145, 1, 145, 1, 145, 1, 145, 1, 145, 1, 145, 1, 145, 1, 146, 1, 146, 1, + 146, 1, 146, 1, 146, 1, 147, 1, 147, 1, 147, 1, 147, 1, 147, 1, 147, 1, + 148, 1, 148, 1, 148, 1, 148, 1, 148, 1, 149, 1, 149, 1, 149, 1, 149, 1, + 149, 1, 149, 1, 149, 1, 149, 1, 150, 1, 150, 1, 150, 1, 150, 1, 150, 1, + 150, 1, 150, 1, 150, 1, 150, 1, 150, 1, 150, 1, 150, 1, 151, 1, 151, 1, + 151, 1, 151, 1, 151, 1, 152, 1, 152, 1, 152, 1, 152, 1, 152, 1, 152, 1, + 152, 1, 152, 1, 152, 1, 152, 1, 153, 1, 153, 1, 153, 1, 153, 1, 153, 1, + 153, 1, 154, 1, 154, 1, 154, 1, 154, 1, 154, 1, 154, 1, 154, 1, 154, 1, + 154, 1, 154, 1, 155, 1, 155, 1, 155, 1, 155, 1, 155, 1, 155, 1, 155, 1, + 155, 1, 155, 1, 155, 1, 156, 1, 156, 1, 156, 1, 156, 1, 156, 1, 156, 1, + 156, 1, 156, 1, 157, 1, 157, 1, 157, 1, 157, 1, 157, 1, 157, 1, 157, 1, + 157, 1, 157, 1, 157, 1, 158, 1, 158, 1, 158, 1, 158, 1, 158, 1, 158, 1, + 158, 1, 158, 1, 158, 1, 158, 1, 159, 1, 159, 1, 159, 1, 159, 1, 159, 1, + 159, 1, 159, 1, 159, 1, 159, 1, 159, 1, 159, 1, 160, 1, 160, 1, 160, 1, + 160, 1, 161, 1, 161, 1, 161, 1, 161, 1, 161, 1, 161, 1, 161, 1, 161, 1, + 161, 1, 161, 1, 161, 1, 162, 1, 162, 1, 162, 1, 162, 1, 162, 1, 163, 1, + 163, 1, 163, 1, 163, 1, 163, 1, 163, 1, 163, 1, 163, 1, 163, 1, 163, 1, + 164, 1, 164, 1, 164, 1, 164, 1, 164, 1, 164, 1, 165, 1, 165, 1, 165, 1, + 165, 1, 165, 1, 165, 1, 165, 1, 165, 1, 165, 1, 165, 1, 165, 1, 165, 1, + 165, 1, 166, 1, 166, 1, 166, 1, 166, 1, 166, 1, 167, 1, 167, 1, 167, 1, + 167, 1, 167, 1, 167, 1, 167, 1, 167, 1, 167, 1, 167, 1, 167, 1, 168, 1, + 168, 1, 168, 1, 168, 1, 168, 1, 168, 1, 168, 1, 168, 1, 168, 1, 168, 1, + 169, 1, 169, 1, 169, 1, 169, 1, 169, 1, 169, 1, 169, 1, 170, 1, 170, 1, + 170, 1, 170, 1, 170, 1, 170, 1, 170, 1, 171, 1, 171, 1, 171, 1, 171, 1, + 171, 1, 172, 1, 172, 1, 172, 1, 172, 1, 172, 1, 172, 1, 173, 1, 173, 1, + 173, 1, 173, 1, 173, 1, 173, 1, 173, 1, 174, 1, 174, 1, 174, 1, 174, 1, + 174, 1, 174, 1, 175, 1, 175, 1, 175, 1, 175, 1, 175, 1, 175, 1, 176, 1, + 176, 1, 176, 1, 176, 1, 176, 1, 177, 1, 177, 1, 177, 1, 177, 1, 177, 1, + 177, 1, 177, 1, 178, 1, 178, 1, 178, 1, 178, 1, 178, 1, 178, 1, 178, 1, + 179, 1, 179, 1, 179, 1, 179, 1, 179, 1, 179, 1, 179, 1, 179, 1, 180, 1, + 180, 1, 180, 1, 180, 1, 180, 1, 181, 1, 181, 1, 181, 1, 181, 1, 181, 1, + 181, 1, 181, 1, 182, 1, 182, 1, 182, 1, 183, 1, 183, 1, 183, 1, 183, 1, + 183, 1, 183, 1, 183, 1, 183, 1, 184, 1, 184, 1, 184, 1, 184, 5, 184, 1562, + 8, 184, 10, 184, 12, 184, 1565, 9, 184, 1, 184, 1, 184, 1, 184, 1, 184, + 1, 184, 5, 184, 1572, 8, 184, 10, 184, 12, 184, 1575, 9, 184, 1, 184, 1, + 184, 1, 184, 5, 184, 1580, 8, 184, 10, 184, 12, 184, 1583, 9, 184, 1, 184, + 1, 184, 1, 184, 5, 184, 1588, 8, 184, 10, 184, 12, 184, 1591, 9, 184, 3, + 184, 1593, 8, 184, 1, 185, 4, 185, 1596, 8, 185, 11, 185, 12, 185, 1597, + 1, 185, 1, 185, 5, 185, 1602, 8, 185, 10, 185, 12, 185, 1605, 9, 185, 3, + 185, 1607, 8, 185, 1, 185, 1, 185, 4, 185, 1611, 8, 185, 11, 185, 12, 185, + 1612, 3, 185, 1615, 8, 185, 1, 185, 1, 185, 3, 185, 1619, 8, 185, 1, 185, + 4, 185, 1622, 8, 185, 11, 185, 12, 185, 1623, 3, 185, 1626, 8, 185, 1, + 185, 1, 185, 1, 185, 1, 185, 4, 185, 1632, 8, 185, 11, 185, 12, 185, 1633, + 3, 185, 1636, 8, 185, 1, 186, 1, 186, 5, 186, 1640, 8, 186, 10, 186, 12, + 186, 1643, 9, 186, 1, 186, 1, 186, 3, 186, 1647, 8, 186, 1, 187, 1, 187, + 1, 187, 1, 187, 5, 187, 1653, 8, 187, 10, 187, 12, 187, 1656, 9, 187, 1, + 187, 1, 187, 1, 188, 1, 188, 1, 188, 1, 189, 1, 189, 1, 189, 1, 189, 5, + 189, 1667, 8, 189, 10, 189, 12, 189, 1670, 9, 189, 1, 189, 3, 189, 1673, + 8, 189, 1, 189, 1, 189, 3, 189, 1677, 8, 189, 1, 189, 1, 189, 1, 190, 1, + 190, 1, 190, 1, 190, 5, 190, 1685, 8, 190, 10, 190, 12, 190, 1688, 9, 190, + 1, 190, 1, 190, 1, 190, 1, 190, 1, 190, 1, 191, 1, 191, 1, 191, 1, 191, + 1, 192, 1, 192, 1, 193, 1, 193, 1, 194, 1, 194, 1, 1686, 0, 195, 1, 1, + 3, 2, 5, 3, 7, 4, 9, 5, 11, 6, 13, 7, 15, 8, 17, 9, 19, 10, 21, 11, 23, + 12, 25, 13, 27, 14, 29, 15, 31, 16, 33, 17, 35, 18, 37, 19, 39, 20, 41, + 21, 43, 22, 45, 23, 47, 24, 49, 25, 51, 26, 53, 27, 55, 28, 57, 29, 59, + 30, 61, 31, 63, 32, 65, 33, 67, 34, 69, 35, 71, 36, 73, 37, 75, 38, 77, + 39, 79, 40, 81, 41, 83, 42, 85, 43, 87, 44, 89, 45, 91, 46, 93, 47, 95, + 48, 97, 49, 99, 50, 101, 51, 103, 52, 105, 53, 107, 54, 109, 55, 111, 56, + 113, 57, 115, 58, 117, 59, 119, 60, 121, 61, 123, 62, 125, 63, 127, 64, + 129, 65, 131, 66, 133, 67, 135, 68, 137, 69, 139, 70, 141, 71, 143, 72, + 145, 73, 147, 74, 149, 75, 151, 76, 153, 77, 155, 78, 157, 79, 159, 80, + 161, 81, 163, 82, 165, 83, 167, 84, 169, 85, 171, 86, 173, 87, 175, 88, + 177, 89, 179, 90, 181, 91, 183, 92, 185, 93, 187, 94, 189, 95, 191, 96, + 193, 97, 195, 98, 197, 99, 199, 100, 201, 101, 203, 102, 205, 103, 207, + 104, 209, 105, 211, 106, 213, 107, 215, 108, 217, 109, 219, 110, 221, 111, + 223, 112, 225, 113, 227, 114, 229, 115, 231, 116, 233, 117, 235, 118, 237, + 119, 239, 120, 241, 121, 243, 122, 245, 123, 247, 124, 249, 125, 251, 126, + 253, 127, 255, 128, 257, 129, 259, 130, 261, 131, 263, 132, 265, 133, 267, + 134, 269, 135, 271, 136, 273, 137, 275, 138, 277, 139, 279, 140, 281, 141, + 283, 142, 285, 143, 287, 144, 289, 145, 291, 146, 293, 147, 295, 148, 297, + 149, 299, 150, 301, 151, 303, 152, 305, 153, 307, 154, 309, 155, 311, 156, + 313, 157, 315, 158, 317, 159, 319, 160, 321, 161, 323, 162, 325, 163, 327, + 164, 329, 165, 331, 166, 333, 167, 335, 168, 337, 169, 339, 170, 341, 171, + 343, 172, 345, 173, 347, 174, 349, 175, 351, 176, 353, 177, 355, 178, 357, + 179, 359, 180, 361, 181, 363, 182, 365, 183, 367, 184, 369, 185, 371, 186, + 373, 187, 375, 188, 377, 189, 379, 190, 381, 191, 383, 192, 385, 193, 387, + 0, 389, 0, 1, 0, 38, 2, 0, 65, 65, 97, 97, 2, 0, 66, 66, 98, 98, 2, 0, + 79, 79, 111, 111, 2, 0, 82, 82, 114, 114, 2, 0, 84, 84, 116, 116, 2, 0, + 67, 67, 99, 99, 2, 0, 73, 73, 105, 105, 2, 0, 78, 78, 110, 110, 2, 0, 68, + 68, 100, 100, 2, 0, 70, 70, 102, 102, 2, 0, 69, 69, 101, 101, 2, 0, 76, + 76, 108, 108, 2, 0, 89, 89, 121, 121, 2, 0, 90, 90, 122, 122, 2, 0, 83, + 83, 115, 115, 2, 0, 72, 72, 104, 104, 2, 0, 85, 85, 117, 117, 2, 0, 77, + 77, 109, 109, 2, 0, 71, 71, 103, 103, 2, 0, 87, 87, 119, 119, 2, 0, 75, + 75, 107, 107, 2, 0, 80, 80, 112, 112, 2, 0, 88, 88, 120, 120, 2, 0, 86, + 86, 118, 118, 2, 0, 74, 74, 106, 106, 2, 0, 81, 81, 113, 113, 1, 0, 34, + 34, 1, 0, 96, 96, 1, 0, 93, 93, 3, 0, 65, 90, 95, 95, 97, 122, 4, 0, 48, + 57, 65, 90, 95, 95, 97, 122, 2, 0, 43, 43, 45, 45, 3, 0, 36, 36, 58, 58, + 64, 64, 1, 0, 39, 39, 2, 0, 10, 10, 13, 13, 3, 0, 9, 11, 13, 13, 32, 32, + 3, 0, 48, 57, 65, 70, 97, 102, 1, 0, 48, 57, 1728, 0, 1, 1, 0, 0, 0, 0, + 3, 1, 0, 0, 0, 0, 5, 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, 0, 9, 1, 0, 0, 0, 0, + 11, 1, 0, 0, 0, 0, 13, 1, 0, 0, 0, 0, 15, 1, 0, 0, 0, 0, 17, 1, 0, 0, 0, + 0, 19, 1, 0, 0, 0, 0, 21, 1, 0, 0, 0, 0, 23, 1, 0, 0, 0, 0, 25, 1, 0, 0, + 0, 0, 27, 1, 0, 0, 0, 0, 29, 1, 0, 0, 0, 0, 31, 1, 0, 0, 0, 0, 33, 1, 0, + 0, 0, 0, 35, 1, 0, 0, 0, 0, 37, 1, 0, 0, 0, 0, 39, 1, 0, 0, 0, 0, 41, 1, + 0, 0, 0, 0, 43, 1, 0, 0, 0, 0, 45, 1, 0, 0, 0, 0, 47, 1, 0, 0, 0, 0, 49, + 1, 0, 0, 0, 0, 51, 1, 0, 0, 0, 0, 53, 1, 0, 0, 0, 0, 55, 1, 0, 0, 0, 0, + 57, 1, 0, 0, 0, 0, 59, 1, 0, 0, 0, 0, 61, 1, 0, 0, 0, 0, 63, 1, 0, 0, 0, + 0, 65, 1, 0, 0, 0, 0, 67, 1, 0, 0, 0, 0, 69, 1, 0, 0, 0, 0, 71, 1, 0, 0, + 0, 0, 73, 1, 0, 0, 0, 0, 75, 1, 0, 0, 0, 0, 77, 1, 0, 0, 0, 0, 79, 1, 0, + 0, 0, 0, 81, 1, 0, 0, 0, 0, 83, 1, 0, 0, 0, 0, 85, 1, 0, 0, 0, 0, 87, 1, + 0, 0, 0, 0, 89, 1, 0, 0, 0, 0, 91, 1, 0, 0, 0, 0, 93, 1, 0, 0, 0, 0, 95, + 1, 0, 0, 0, 0, 97, 1, 0, 0, 0, 0, 99, 1, 0, 0, 0, 0, 101, 1, 0, 0, 0, 0, + 103, 1, 0, 0, 0, 0, 105, 1, 0, 0, 0, 0, 107, 1, 0, 0, 0, 0, 109, 1, 0, + 0, 0, 0, 111, 1, 0, 0, 0, 0, 113, 1, 0, 0, 0, 0, 115, 1, 0, 0, 0, 0, 117, + 1, 0, 0, 0, 0, 119, 1, 0, 0, 0, 0, 121, 1, 0, 0, 0, 0, 123, 1, 0, 0, 0, + 0, 125, 1, 0, 0, 0, 0, 127, 1, 0, 0, 0, 0, 129, 1, 0, 0, 0, 0, 131, 1, + 0, 0, 0, 0, 133, 1, 0, 0, 0, 0, 135, 1, 0, 0, 0, 0, 137, 1, 0, 0, 0, 0, + 139, 1, 0, 0, 0, 0, 141, 1, 0, 0, 0, 0, 143, 1, 0, 0, 0, 0, 145, 1, 0, + 0, 0, 0, 147, 1, 0, 0, 0, 0, 149, 1, 0, 0, 0, 0, 151, 1, 0, 0, 0, 0, 153, + 1, 0, 0, 0, 0, 155, 1, 0, 0, 0, 0, 157, 1, 0, 0, 0, 0, 159, 1, 0, 0, 0, + 0, 161, 1, 0, 0, 0, 0, 163, 1, 0, 0, 0, 0, 165, 1, 0, 0, 0, 0, 167, 1, + 0, 0, 0, 0, 169, 1, 0, 0, 0, 0, 171, 1, 0, 0, 0, 0, 173, 1, 0, 0, 0, 0, + 175, 1, 0, 0, 0, 0, 177, 1, 0, 0, 0, 0, 179, 1, 0, 0, 0, 0, 181, 1, 0, + 0, 0, 0, 183, 1, 0, 0, 0, 0, 185, 1, 0, 0, 0, 0, 187, 1, 0, 0, 0, 0, 189, + 1, 0, 0, 0, 0, 191, 1, 0, 0, 0, 0, 193, 1, 0, 0, 0, 0, 195, 1, 0, 0, 0, + 0, 197, 1, 0, 0, 0, 0, 199, 1, 0, 0, 0, 0, 201, 1, 0, 0, 0, 0, 203, 1, + 0, 0, 0, 0, 205, 1, 0, 0, 0, 0, 207, 1, 0, 0, 0, 0, 209, 1, 0, 0, 0, 0, + 211, 1, 0, 0, 0, 0, 213, 1, 0, 0, 0, 0, 215, 1, 0, 0, 0, 0, 217, 1, 0, + 0, 0, 0, 219, 1, 0, 0, 0, 0, 221, 1, 0, 0, 0, 0, 223, 1, 0, 0, 0, 0, 225, + 1, 0, 0, 0, 0, 227, 1, 0, 0, 0, 0, 229, 1, 0, 0, 0, 0, 231, 1, 0, 0, 0, + 0, 233, 1, 0, 0, 0, 0, 235, 1, 0, 0, 0, 0, 237, 1, 0, 0, 0, 0, 239, 1, + 0, 0, 0, 0, 241, 1, 0, 0, 0, 0, 243, 1, 0, 0, 0, 0, 245, 1, 0, 0, 0, 0, + 247, 1, 0, 0, 0, 0, 249, 1, 0, 0, 0, 0, 251, 1, 0, 0, 0, 0, 253, 1, 0, + 0, 0, 0, 255, 1, 0, 0, 0, 0, 257, 1, 0, 0, 0, 0, 259, 1, 0, 0, 0, 0, 261, + 1, 0, 0, 0, 0, 263, 1, 0, 0, 0, 0, 265, 1, 0, 0, 0, 0, 267, 1, 0, 0, 0, + 0, 269, 1, 0, 0, 0, 0, 271, 1, 0, 0, 0, 0, 273, 1, 0, 0, 0, 0, 275, 1, + 0, 0, 0, 0, 277, 1, 0, 0, 0, 0, 279, 1, 0, 0, 0, 0, 281, 1, 0, 0, 0, 0, + 283, 1, 0, 0, 0, 0, 285, 1, 0, 0, 0, 0, 287, 1, 0, 0, 0, 0, 289, 1, 0, + 0, 0, 0, 291, 1, 0, 0, 0, 0, 293, 1, 0, 0, 0, 0, 295, 1, 0, 0, 0, 0, 297, + 1, 0, 0, 0, 0, 299, 1, 0, 0, 0, 0, 301, 1, 0, 0, 0, 0, 303, 1, 0, 0, 0, + 0, 305, 1, 0, 0, 0, 0, 307, 1, 0, 0, 0, 0, 309, 1, 0, 0, 0, 0, 311, 1, + 0, 0, 0, 0, 313, 1, 0, 0, 0, 0, 315, 1, 0, 0, 0, 0, 317, 1, 0, 0, 0, 0, + 319, 1, 0, 0, 0, 0, 321, 1, 0, 0, 0, 0, 323, 1, 0, 0, 0, 0, 325, 1, 0, + 0, 0, 0, 327, 1, 0, 0, 0, 0, 329, 1, 0, 0, 0, 0, 331, 1, 0, 0, 0, 0, 333, + 1, 0, 0, 0, 0, 335, 1, 0, 0, 0, 0, 337, 1, 0, 0, 0, 0, 339, 1, 0, 0, 0, + 0, 341, 1, 0, 0, 0, 0, 343, 1, 0, 0, 0, 0, 345, 1, 0, 0, 0, 0, 347, 1, + 0, 0, 0, 0, 349, 1, 0, 0, 0, 0, 351, 1, 0, 0, 0, 0, 353, 1, 0, 0, 0, 0, + 355, 1, 0, 0, 0, 0, 357, 1, 0, 0, 0, 0, 359, 1, 0, 0, 0, 0, 361, 1, 0, + 0, 0, 0, 363, 1, 0, 0, 0, 0, 365, 1, 0, 0, 0, 0, 367, 1, 0, 0, 0, 0, 369, + 1, 0, 0, 0, 0, 371, 1, 0, 0, 0, 0, 373, 1, 0, 0, 0, 0, 375, 1, 0, 0, 0, + 0, 377, 1, 0, 0, 0, 0, 379, 1, 0, 0, 0, 0, 381, 1, 0, 0, 0, 0, 383, 1, + 0, 0, 0, 0, 385, 1, 0, 0, 0, 1, 391, 1, 0, 0, 0, 3, 393, 1, 0, 0, 0, 5, + 395, 1, 0, 0, 0, 7, 397, 1, 0, 0, 0, 9, 399, 1, 0, 0, 0, 11, 401, 1, 0, + 0, 0, 13, 403, 1, 0, 0, 0, 15, 405, 1, 0, 0, 0, 17, 407, 1, 0, 0, 0, 19, + 409, 1, 0, 0, 0, 21, 411, 1, 0, 0, 0, 23, 414, 1, 0, 0, 0, 25, 416, 1, + 0, 0, 0, 27, 418, 1, 0, 0, 0, 29, 421, 1, 0, 0, 0, 31, 424, 1, 0, 0, 0, + 33, 426, 1, 0, 0, 0, 35, 428, 1, 0, 0, 0, 37, 430, 1, 0, 0, 0, 39, 433, + 1, 0, 0, 0, 41, 435, 1, 0, 0, 0, 43, 438, 1, 0, 0, 0, 45, 441, 1, 0, 0, + 0, 47, 444, 1, 0, 0, 0, 49, 447, 1, 0, 0, 0, 51, 453, 1, 0, 0, 0, 53, 460, + 1, 0, 0, 0, 55, 464, 1, 0, 0, 0, 57, 470, 1, 0, 0, 0, 59, 474, 1, 0, 0, + 0, 61, 480, 1, 0, 0, 0, 63, 488, 1, 0, 0, 0, 65, 492, 1, 0, 0, 0, 67, 495, + 1, 0, 0, 0, 69, 499, 1, 0, 0, 0, 71, 506, 1, 0, 0, 0, 73, 520, 1, 0, 0, + 0, 75, 527, 1, 0, 0, 0, 77, 533, 1, 0, 0, 0, 79, 541, 1, 0, 0, 0, 81, 544, + 1, 0, 0, 0, 83, 552, 1, 0, 0, 0, 85, 557, 1, 0, 0, 0, 87, 562, 1, 0, 0, + 0, 89, 568, 1, 0, 0, 0, 91, 576, 1, 0, 0, 0, 93, 583, 1, 0, 0, 0, 95, 590, + 1, 0, 0, 0, 97, 599, 1, 0, 0, 0, 99, 610, 1, 0, 0, 0, 101, 617, 1, 0, 0, + 0, 103, 623, 1, 0, 0, 0, 105, 636, 1, 0, 0, 0, 107, 649, 1, 0, 0, 0, 109, + 667, 1, 0, 0, 0, 111, 676, 1, 0, 0, 0, 113, 684, 1, 0, 0, 0, 115, 695, + 1, 0, 0, 0, 117, 704, 1, 0, 0, 0, 119, 711, 1, 0, 0, 0, 121, 716, 1, 0, + 0, 0, 123, 723, 1, 0, 0, 0, 125, 732, 1, 0, 0, 0, 127, 737, 1, 0, 0, 0, + 129, 742, 1, 0, 0, 0, 131, 747, 1, 0, 0, 0, 133, 751, 1, 0, 0, 0, 135, + 758, 1, 0, 0, 0, 137, 765, 1, 0, 0, 0, 139, 775, 1, 0, 0, 0, 141, 782, + 1, 0, 0, 0, 143, 790, 1, 0, 0, 0, 145, 795, 1, 0, 0, 0, 147, 799, 1, 0, + 0, 0, 149, 807, 1, 0, 0, 0, 151, 812, 1, 0, 0, 0, 153, 817, 1, 0, 0, 0, + 155, 822, 1, 0, 0, 0, 157, 828, 1, 0, 0, 0, 159, 835, 1, 0, 0, 0, 161, + 838, 1, 0, 0, 0, 163, 845, 1, 0, 0, 0, 165, 855, 1, 0, 0, 0, 167, 858, + 1, 0, 0, 0, 169, 864, 1, 0, 0, 0, 171, 872, 1, 0, 0, 0, 173, 882, 1, 0, + 0, 0, 175, 888, 1, 0, 0, 0, 177, 895, 1, 0, 0, 0, 179, 903, 1, 0, 0, 0, + 181, 913, 1, 0, 0, 0, 183, 918, 1, 0, 0, 0, 185, 921, 1, 0, 0, 0, 187, + 928, 1, 0, 0, 0, 189, 933, 1, 0, 0, 0, 191, 937, 1, 0, 0, 0, 193, 942, + 1, 0, 0, 0, 195, 947, 1, 0, 0, 0, 197, 953, 1, 0, 0, 0, 199, 959, 1, 0, + 0, 0, 201, 967, 1, 0, 0, 0, 203, 970, 1, 0, 0, 0, 205, 974, 1, 0, 0, 0, + 207, 982, 1, 0, 0, 0, 209, 987, 1, 0, 0, 0, 211, 990, 1, 0, 0, 0, 213, + 997, 1, 0, 0, 0, 215, 1000, 1, 0, 0, 0, 217, 1003, 1, 0, 0, 0, 219, 1009, + 1, 0, 0, 0, 221, 1015, 1, 0, 0, 0, 223, 1020, 1, 0, 0, 0, 225, 1027, 1, + 0, 0, 0, 227, 1035, 1, 0, 0, 0, 229, 1041, 1, 0, 0, 0, 231, 1047, 1, 0, + 0, 0, 233, 1057, 1, 0, 0, 0, 235, 1068, 1, 0, 0, 0, 237, 1075, 1, 0, 0, + 0, 239, 1083, 1, 0, 0, 0, 241, 1091, 1, 0, 0, 0, 243, 1098, 1, 0, 0, 0, + 245, 1106, 1, 0, 0, 0, 247, 1115, 1, 0, 0, 0, 249, 1125, 1, 0, 0, 0, 251, + 1131, 1, 0, 0, 0, 253, 1140, 1, 0, 0, 0, 255, 1144, 1, 0, 0, 0, 257, 1149, + 1, 0, 0, 0, 259, 1159, 1, 0, 0, 0, 261, 1166, 1, 0, 0, 0, 263, 1170, 1, + 0, 0, 0, 265, 1176, 1, 0, 0, 0, 267, 1181, 1, 0, 0, 0, 269, 1191, 1, 0, + 0, 0, 271, 1196, 1, 0, 0, 0, 273, 1199, 1, 0, 0, 0, 275, 1211, 1, 0, 0, + 0, 277, 1219, 1, 0, 0, 0, 279, 1225, 1, 0, 0, 0, 281, 1232, 1, 0, 0, 0, + 283, 1239, 1, 0, 0, 0, 285, 1245, 1, 0, 0, 0, 287, 1252, 1, 0, 0, 0, 289, + 1259, 1, 0, 0, 0, 291, 1264, 1, 0, 0, 0, 293, 1272, 1, 0, 0, 0, 295, 1277, + 1, 0, 0, 0, 297, 1283, 1, 0, 0, 0, 299, 1288, 1, 0, 0, 0, 301, 1296, 1, + 0, 0, 0, 303, 1308, 1, 0, 0, 0, 305, 1313, 1, 0, 0, 0, 307, 1323, 1, 0, + 0, 0, 309, 1329, 1, 0, 0, 0, 311, 1339, 1, 0, 0, 0, 313, 1349, 1, 0, 0, + 0, 315, 1357, 1, 0, 0, 0, 317, 1367, 1, 0, 0, 0, 319, 1377, 1, 0, 0, 0, + 321, 1388, 1, 0, 0, 0, 323, 1392, 1, 0, 0, 0, 325, 1403, 1, 0, 0, 0, 327, + 1408, 1, 0, 0, 0, 329, 1418, 1, 0, 0, 0, 331, 1424, 1, 0, 0, 0, 333, 1437, + 1, 0, 0, 0, 335, 1442, 1, 0, 0, 0, 337, 1453, 1, 0, 0, 0, 339, 1463, 1, + 0, 0, 0, 341, 1470, 1, 0, 0, 0, 343, 1477, 1, 0, 0, 0, 345, 1482, 1, 0, + 0, 0, 347, 1488, 1, 0, 0, 0, 349, 1495, 1, 0, 0, 0, 351, 1501, 1, 0, 0, + 0, 353, 1507, 1, 0, 0, 0, 355, 1512, 1, 0, 0, 0, 357, 1519, 1, 0, 0, 0, + 359, 1526, 1, 0, 0, 0, 361, 1534, 1, 0, 0, 0, 363, 1539, 1, 0, 0, 0, 365, + 1546, 1, 0, 0, 0, 367, 1549, 1, 0, 0, 0, 369, 1592, 1, 0, 0, 0, 371, 1635, + 1, 0, 0, 0, 373, 1646, 1, 0, 0, 0, 375, 1648, 1, 0, 0, 0, 377, 1659, 1, + 0, 0, 0, 379, 1662, 1, 0, 0, 0, 381, 1680, 1, 0, 0, 0, 383, 1694, 1, 0, + 0, 0, 385, 1698, 1, 0, 0, 0, 387, 1700, 1, 0, 0, 0, 389, 1702, 1, 0, 0, + 0, 391, 392, 5, 59, 0, 0, 392, 2, 1, 0, 0, 0, 393, 394, 5, 46, 0, 0, 394, + 4, 1, 0, 0, 0, 395, 396, 5, 40, 0, 0, 396, 6, 1, 0, 0, 0, 397, 398, 5, + 41, 0, 0, 398, 8, 1, 0, 0, 0, 399, 400, 5, 44, 0, 0, 400, 10, 1, 0, 0, + 0, 401, 402, 5, 61, 0, 0, 402, 12, 1, 0, 0, 0, 403, 404, 5, 42, 0, 0, 404, + 14, 1, 0, 0, 0, 405, 406, 5, 43, 0, 0, 406, 16, 1, 0, 0, 0, 407, 408, 5, + 45, 0, 0, 408, 18, 1, 0, 0, 0, 409, 410, 5, 126, 0, 0, 410, 20, 1, 0, 0, + 0, 411, 412, 5, 124, 0, 0, 412, 413, 5, 124, 0, 0, 413, 22, 1, 0, 0, 0, + 414, 415, 5, 47, 0, 0, 415, 24, 1, 0, 0, 0, 416, 417, 5, 37, 0, 0, 417, + 26, 1, 0, 0, 0, 418, 419, 5, 60, 0, 0, 419, 420, 5, 60, 0, 0, 420, 28, + 1, 0, 0, 0, 421, 422, 5, 62, 0, 0, 422, 423, 5, 62, 0, 0, 423, 30, 1, 0, + 0, 0, 424, 425, 5, 38, 0, 0, 425, 32, 1, 0, 0, 0, 426, 427, 5, 124, 0, + 0, 427, 34, 1, 0, 0, 0, 428, 429, 5, 60, 0, 0, 429, 36, 1, 0, 0, 0, 430, + 431, 5, 60, 0, 0, 431, 432, 5, 61, 0, 0, 432, 38, 1, 0, 0, 0, 433, 434, + 5, 62, 0, 0, 434, 40, 1, 0, 0, 0, 435, 436, 5, 62, 0, 0, 436, 437, 5, 61, + 0, 0, 437, 42, 1, 0, 0, 0, 438, 439, 5, 61, 0, 0, 439, 440, 5, 61, 0, 0, + 440, 44, 1, 0, 0, 0, 441, 442, 5, 33, 0, 0, 442, 443, 5, 61, 0, 0, 443, + 46, 1, 0, 0, 0, 444, 445, 5, 60, 0, 0, 445, 446, 5, 62, 0, 0, 446, 48, + 1, 0, 0, 0, 447, 448, 7, 0, 0, 0, 448, 449, 7, 1, 0, 0, 449, 450, 7, 2, + 0, 0, 450, 451, 7, 3, 0, 0, 451, 452, 7, 4, 0, 0, 452, 50, 1, 0, 0, 0, + 453, 454, 7, 0, 0, 0, 454, 455, 7, 5, 0, 0, 455, 456, 7, 4, 0, 0, 456, + 457, 7, 6, 0, 0, 457, 458, 7, 2, 0, 0, 458, 459, 7, 7, 0, 0, 459, 52, 1, + 0, 0, 0, 460, 461, 7, 0, 0, 0, 461, 462, 7, 8, 0, 0, 462, 463, 7, 8, 0, + 0, 463, 54, 1, 0, 0, 0, 464, 465, 7, 0, 0, 0, 465, 466, 7, 9, 0, 0, 466, + 467, 7, 4, 0, 0, 467, 468, 7, 10, 0, 0, 468, 469, 7, 3, 0, 0, 469, 56, + 1, 0, 0, 0, 470, 471, 7, 0, 0, 0, 471, 472, 7, 11, 0, 0, 472, 473, 7, 11, + 0, 0, 473, 58, 1, 0, 0, 0, 474, 475, 7, 0, 0, 0, 475, 476, 7, 11, 0, 0, + 476, 477, 7, 4, 0, 0, 477, 478, 7, 10, 0, 0, 478, 479, 7, 3, 0, 0, 479, + 60, 1, 0, 0, 0, 480, 481, 7, 0, 0, 0, 481, 482, 7, 7, 0, 0, 482, 483, 7, + 0, 0, 0, 483, 484, 7, 11, 0, 0, 484, 485, 7, 12, 0, 0, 485, 486, 7, 13, + 0, 0, 486, 487, 7, 10, 0, 0, 487, 62, 1, 0, 0, 0, 488, 489, 7, 0, 0, 0, + 489, 490, 7, 7, 0, 0, 490, 491, 7, 8, 0, 0, 491, 64, 1, 0, 0, 0, 492, 493, + 7, 0, 0, 0, 493, 494, 7, 14, 0, 0, 494, 66, 1, 0, 0, 0, 495, 496, 7, 0, + 0, 0, 496, 497, 7, 14, 0, 0, 497, 498, 7, 5, 0, 0, 498, 68, 1, 0, 0, 0, + 499, 500, 7, 0, 0, 0, 500, 501, 7, 4, 0, 0, 501, 502, 7, 4, 0, 0, 502, + 503, 7, 0, 0, 0, 503, 504, 7, 5, 0, 0, 504, 505, 7, 15, 0, 0, 505, 70, + 1, 0, 0, 0, 506, 507, 7, 0, 0, 0, 507, 508, 7, 16, 0, 0, 508, 509, 7, 4, + 0, 0, 509, 510, 7, 2, 0, 0, 510, 511, 7, 6, 0, 0, 511, 512, 7, 7, 0, 0, + 512, 513, 7, 5, 0, 0, 513, 514, 7, 3, 0, 0, 514, 515, 7, 10, 0, 0, 515, + 516, 7, 17, 0, 0, 516, 517, 7, 10, 0, 0, 517, 518, 7, 7, 0, 0, 518, 519, + 7, 4, 0, 0, 519, 72, 1, 0, 0, 0, 520, 521, 7, 1, 0, 0, 521, 522, 7, 10, + 0, 0, 522, 523, 7, 9, 0, 0, 523, 524, 7, 2, 0, 0, 524, 525, 7, 3, 0, 0, + 525, 526, 7, 10, 0, 0, 526, 74, 1, 0, 0, 0, 527, 528, 7, 1, 0, 0, 528, + 529, 7, 10, 0, 0, 529, 530, 7, 18, 0, 0, 530, 531, 7, 6, 0, 0, 531, 532, + 7, 7, 0, 0, 532, 76, 1, 0, 0, 0, 533, 534, 7, 1, 0, 0, 534, 535, 7, 10, + 0, 0, 535, 536, 7, 4, 0, 0, 536, 537, 7, 19, 0, 0, 537, 538, 7, 10, 0, + 0, 538, 539, 7, 10, 0, 0, 539, 540, 7, 7, 0, 0, 540, 78, 1, 0, 0, 0, 541, + 542, 7, 1, 0, 0, 542, 543, 7, 12, 0, 0, 543, 80, 1, 0, 0, 0, 544, 545, + 7, 5, 0, 0, 545, 546, 7, 0, 0, 0, 546, 547, 7, 14, 0, 0, 547, 548, 7, 5, + 0, 0, 548, 549, 7, 0, 0, 0, 549, 550, 7, 8, 0, 0, 550, 551, 7, 10, 0, 0, + 551, 82, 1, 0, 0, 0, 552, 553, 7, 5, 0, 0, 553, 554, 7, 0, 0, 0, 554, 555, + 7, 14, 0, 0, 555, 556, 7, 10, 0, 0, 556, 84, 1, 0, 0, 0, 557, 558, 7, 5, + 0, 0, 558, 559, 7, 0, 0, 0, 559, 560, 7, 14, 0, 0, 560, 561, 7, 4, 0, 0, + 561, 86, 1, 0, 0, 0, 562, 563, 7, 5, 0, 0, 563, 564, 7, 15, 0, 0, 564, + 565, 7, 10, 0, 0, 565, 566, 7, 5, 0, 0, 566, 567, 7, 20, 0, 0, 567, 88, + 1, 0, 0, 0, 568, 569, 7, 5, 0, 0, 569, 570, 7, 2, 0, 0, 570, 571, 7, 11, + 0, 0, 571, 572, 7, 11, 0, 0, 572, 573, 7, 0, 0, 0, 573, 574, 7, 4, 0, 0, + 574, 575, 7, 10, 0, 0, 575, 90, 1, 0, 0, 0, 576, 577, 7, 5, 0, 0, 577, + 578, 7, 2, 0, 0, 578, 579, 7, 11, 0, 0, 579, 580, 7, 16, 0, 0, 580, 581, + 7, 17, 0, 0, 581, 582, 7, 7, 0, 0, 582, 92, 1, 0, 0, 0, 583, 584, 7, 5, + 0, 0, 584, 585, 7, 2, 0, 0, 585, 586, 7, 17, 0, 0, 586, 587, 7, 17, 0, + 0, 587, 588, 7, 6, 0, 0, 588, 589, 7, 4, 0, 0, 589, 94, 1, 0, 0, 0, 590, + 591, 7, 5, 0, 0, 591, 592, 7, 2, 0, 0, 592, 593, 7, 7, 0, 0, 593, 594, + 7, 9, 0, 0, 594, 595, 7, 11, 0, 0, 595, 596, 7, 6, 0, 0, 596, 597, 7, 5, + 0, 0, 597, 598, 7, 4, 0, 0, 598, 96, 1, 0, 0, 0, 599, 600, 7, 5, 0, 0, + 600, 601, 7, 2, 0, 0, 601, 602, 7, 7, 0, 0, 602, 603, 7, 14, 0, 0, 603, + 604, 7, 4, 0, 0, 604, 605, 7, 3, 0, 0, 605, 606, 7, 0, 0, 0, 606, 607, + 7, 6, 0, 0, 607, 608, 7, 7, 0, 0, 608, 609, 7, 4, 0, 0, 609, 98, 1, 0, + 0, 0, 610, 611, 7, 5, 0, 0, 611, 612, 7, 3, 0, 0, 612, 613, 7, 10, 0, 0, + 613, 614, 7, 0, 0, 0, 614, 615, 7, 4, 0, 0, 615, 616, 7, 10, 0, 0, 616, + 100, 1, 0, 0, 0, 617, 618, 7, 5, 0, 0, 618, 619, 7, 3, 0, 0, 619, 620, + 7, 2, 0, 0, 620, 621, 7, 14, 0, 0, 621, 622, 7, 14, 0, 0, 622, 102, 1, + 0, 0, 0, 623, 624, 7, 5, 0, 0, 624, 625, 7, 16, 0, 0, 625, 626, 7, 3, 0, + 0, 626, 627, 7, 3, 0, 0, 627, 628, 7, 10, 0, 0, 628, 629, 7, 7, 0, 0, 629, + 630, 7, 4, 0, 0, 630, 631, 5, 95, 0, 0, 631, 632, 7, 8, 0, 0, 632, 633, + 7, 0, 0, 0, 633, 634, 7, 4, 0, 0, 634, 635, 7, 10, 0, 0, 635, 104, 1, 0, + 0, 0, 636, 637, 7, 5, 0, 0, 637, 638, 7, 16, 0, 0, 638, 639, 7, 3, 0, 0, + 639, 640, 7, 3, 0, 0, 640, 641, 7, 10, 0, 0, 641, 642, 7, 7, 0, 0, 642, + 643, 7, 4, 0, 0, 643, 644, 5, 95, 0, 0, 644, 645, 7, 4, 0, 0, 645, 646, + 7, 6, 0, 0, 646, 647, 7, 17, 0, 0, 647, 648, 7, 10, 0, 0, 648, 106, 1, + 0, 0, 0, 649, 650, 7, 5, 0, 0, 650, 651, 7, 16, 0, 0, 651, 652, 7, 3, 0, + 0, 652, 653, 7, 3, 0, 0, 653, 654, 7, 10, 0, 0, 654, 655, 7, 7, 0, 0, 655, + 656, 7, 4, 0, 0, 656, 657, 5, 95, 0, 0, 657, 658, 7, 4, 0, 0, 658, 659, + 7, 6, 0, 0, 659, 660, 7, 17, 0, 0, 660, 661, 7, 10, 0, 0, 661, 662, 7, + 14, 0, 0, 662, 663, 7, 4, 0, 0, 663, 664, 7, 0, 0, 0, 664, 665, 7, 17, + 0, 0, 665, 666, 7, 21, 0, 0, 666, 108, 1, 0, 0, 0, 667, 668, 7, 8, 0, 0, + 668, 669, 7, 0, 0, 0, 669, 670, 7, 4, 0, 0, 670, 671, 7, 0, 0, 0, 671, + 672, 7, 1, 0, 0, 672, 673, 7, 0, 0, 0, 673, 674, 7, 14, 0, 0, 674, 675, + 7, 10, 0, 0, 675, 110, 1, 0, 0, 0, 676, 677, 7, 8, 0, 0, 677, 678, 7, 10, + 0, 0, 678, 679, 7, 9, 0, 0, 679, 680, 7, 0, 0, 0, 680, 681, 7, 16, 0, 0, + 681, 682, 7, 11, 0, 0, 682, 683, 7, 4, 0, 0, 683, 112, 1, 0, 0, 0, 684, + 685, 7, 8, 0, 0, 685, 686, 7, 10, 0, 0, 686, 687, 7, 9, 0, 0, 687, 688, + 7, 10, 0, 0, 688, 689, 7, 3, 0, 0, 689, 690, 7, 3, 0, 0, 690, 691, 7, 0, + 0, 0, 691, 692, 7, 1, 0, 0, 692, 693, 7, 11, 0, 0, 693, 694, 7, 10, 0, + 0, 694, 114, 1, 0, 0, 0, 695, 696, 7, 8, 0, 0, 696, 697, 7, 10, 0, 0, 697, + 698, 7, 9, 0, 0, 698, 699, 7, 10, 0, 0, 699, 700, 7, 3, 0, 0, 700, 701, + 7, 3, 0, 0, 701, 702, 7, 10, 0, 0, 702, 703, 7, 8, 0, 0, 703, 116, 1, 0, + 0, 0, 704, 705, 7, 8, 0, 0, 705, 706, 7, 10, 0, 0, 706, 707, 7, 11, 0, + 0, 707, 708, 7, 10, 0, 0, 708, 709, 7, 4, 0, 0, 709, 710, 7, 10, 0, 0, + 710, 118, 1, 0, 0, 0, 711, 712, 7, 8, 0, 0, 712, 713, 7, 10, 0, 0, 713, + 714, 7, 14, 0, 0, 714, 715, 7, 5, 0, 0, 715, 120, 1, 0, 0, 0, 716, 717, + 7, 8, 0, 0, 717, 718, 7, 10, 0, 0, 718, 719, 7, 4, 0, 0, 719, 720, 7, 0, + 0, 0, 720, 721, 7, 5, 0, 0, 721, 722, 7, 15, 0, 0, 722, 122, 1, 0, 0, 0, + 723, 724, 7, 8, 0, 0, 724, 725, 7, 6, 0, 0, 725, 726, 7, 14, 0, 0, 726, + 727, 7, 4, 0, 0, 727, 728, 7, 6, 0, 0, 728, 729, 7, 7, 0, 0, 729, 730, + 7, 5, 0, 0, 730, 731, 7, 4, 0, 0, 731, 124, 1, 0, 0, 0, 732, 733, 7, 8, + 0, 0, 733, 734, 7, 3, 0, 0, 734, 735, 7, 2, 0, 0, 735, 736, 7, 21, 0, 0, + 736, 126, 1, 0, 0, 0, 737, 738, 7, 10, 0, 0, 738, 739, 7, 0, 0, 0, 739, + 740, 7, 5, 0, 0, 740, 741, 7, 15, 0, 0, 741, 128, 1, 0, 0, 0, 742, 743, + 7, 10, 0, 0, 743, 744, 7, 11, 0, 0, 744, 745, 7, 14, 0, 0, 745, 746, 7, + 10, 0, 0, 746, 130, 1, 0, 0, 0, 747, 748, 7, 10, 0, 0, 748, 749, 7, 7, + 0, 0, 749, 750, 7, 8, 0, 0, 750, 132, 1, 0, 0, 0, 751, 752, 7, 10, 0, 0, + 752, 753, 7, 14, 0, 0, 753, 754, 7, 5, 0, 0, 754, 755, 7, 0, 0, 0, 755, + 756, 7, 21, 0, 0, 756, 757, 7, 10, 0, 0, 757, 134, 1, 0, 0, 0, 758, 759, + 7, 10, 0, 0, 759, 760, 7, 22, 0, 0, 760, 761, 7, 5, 0, 0, 761, 762, 7, + 10, 0, 0, 762, 763, 7, 21, 0, 0, 763, 764, 7, 4, 0, 0, 764, 136, 1, 0, + 0, 0, 765, 766, 7, 10, 0, 0, 766, 767, 7, 22, 0, 0, 767, 768, 7, 5, 0, + 0, 768, 769, 7, 11, 0, 0, 769, 770, 7, 16, 0, 0, 770, 771, 7, 14, 0, 0, + 771, 772, 7, 6, 0, 0, 772, 773, 7, 23, 0, 0, 773, 774, 7, 10, 0, 0, 774, + 138, 1, 0, 0, 0, 775, 776, 7, 10, 0, 0, 776, 777, 7, 22, 0, 0, 777, 778, + 7, 6, 0, 0, 778, 779, 7, 14, 0, 0, 779, 780, 7, 4, 0, 0, 780, 781, 7, 14, + 0, 0, 781, 140, 1, 0, 0, 0, 782, 783, 7, 10, 0, 0, 783, 784, 7, 22, 0, + 0, 784, 785, 7, 21, 0, 0, 785, 786, 7, 11, 0, 0, 786, 787, 7, 0, 0, 0, + 787, 788, 7, 6, 0, 0, 788, 789, 7, 7, 0, 0, 789, 142, 1, 0, 0, 0, 790, + 791, 7, 9, 0, 0, 791, 792, 7, 0, 0, 0, 792, 793, 7, 6, 0, 0, 793, 794, + 7, 11, 0, 0, 794, 144, 1, 0, 0, 0, 795, 796, 7, 9, 0, 0, 796, 797, 7, 2, + 0, 0, 797, 798, 7, 3, 0, 0, 798, 146, 1, 0, 0, 0, 799, 800, 7, 9, 0, 0, + 800, 801, 7, 2, 0, 0, 801, 802, 7, 3, 0, 0, 802, 803, 7, 10, 0, 0, 803, + 804, 7, 6, 0, 0, 804, 805, 7, 18, 0, 0, 805, 806, 7, 7, 0, 0, 806, 148, + 1, 0, 0, 0, 807, 808, 7, 9, 0, 0, 808, 809, 7, 3, 0, 0, 809, 810, 7, 2, + 0, 0, 810, 811, 7, 17, 0, 0, 811, 150, 1, 0, 0, 0, 812, 813, 7, 9, 0, 0, + 813, 814, 7, 16, 0, 0, 814, 815, 7, 11, 0, 0, 815, 816, 7, 11, 0, 0, 816, + 152, 1, 0, 0, 0, 817, 818, 7, 18, 0, 0, 818, 819, 7, 11, 0, 0, 819, 820, + 7, 2, 0, 0, 820, 821, 7, 1, 0, 0, 821, 154, 1, 0, 0, 0, 822, 823, 7, 18, + 0, 0, 823, 824, 7, 3, 0, 0, 824, 825, 7, 2, 0, 0, 825, 826, 7, 16, 0, 0, + 826, 827, 7, 21, 0, 0, 827, 156, 1, 0, 0, 0, 828, 829, 7, 15, 0, 0, 829, + 830, 7, 0, 0, 0, 830, 831, 7, 23, 0, 0, 831, 832, 7, 6, 0, 0, 832, 833, + 7, 7, 0, 0, 833, 834, 7, 18, 0, 0, 834, 158, 1, 0, 0, 0, 835, 836, 7, 6, + 0, 0, 836, 837, 7, 9, 0, 0, 837, 160, 1, 0, 0, 0, 838, 839, 7, 6, 0, 0, + 839, 840, 7, 18, 0, 0, 840, 841, 7, 7, 0, 0, 841, 842, 7, 2, 0, 0, 842, + 843, 7, 3, 0, 0, 843, 844, 7, 10, 0, 0, 844, 162, 1, 0, 0, 0, 845, 846, + 7, 6, 0, 0, 846, 847, 7, 17, 0, 0, 847, 848, 7, 17, 0, 0, 848, 849, 7, + 10, 0, 0, 849, 850, 7, 8, 0, 0, 850, 851, 7, 6, 0, 0, 851, 852, 7, 0, 0, + 0, 852, 853, 7, 4, 0, 0, 853, 854, 7, 10, 0, 0, 854, 164, 1, 0, 0, 0, 855, + 856, 7, 6, 0, 0, 856, 857, 7, 7, 0, 0, 857, 166, 1, 0, 0, 0, 858, 859, + 7, 6, 0, 0, 859, 860, 7, 7, 0, 0, 860, 861, 7, 8, 0, 0, 861, 862, 7, 10, + 0, 0, 862, 863, 7, 22, 0, 0, 863, 168, 1, 0, 0, 0, 864, 865, 7, 6, 0, 0, + 865, 866, 7, 7, 0, 0, 866, 867, 7, 8, 0, 0, 867, 868, 7, 10, 0, 0, 868, + 869, 7, 22, 0, 0, 869, 870, 7, 10, 0, 0, 870, 871, 7, 8, 0, 0, 871, 170, + 1, 0, 0, 0, 872, 873, 7, 6, 0, 0, 873, 874, 7, 7, 0, 0, 874, 875, 7, 6, + 0, 0, 875, 876, 7, 4, 0, 0, 876, 877, 7, 6, 0, 0, 877, 878, 7, 0, 0, 0, + 878, 879, 7, 11, 0, 0, 879, 880, 7, 11, 0, 0, 880, 881, 7, 12, 0, 0, 881, + 172, 1, 0, 0, 0, 882, 883, 7, 6, 0, 0, 883, 884, 7, 7, 0, 0, 884, 885, + 7, 7, 0, 0, 885, 886, 7, 10, 0, 0, 886, 887, 7, 3, 0, 0, 887, 174, 1, 0, + 0, 0, 888, 889, 7, 6, 0, 0, 889, 890, 7, 7, 0, 0, 890, 891, 7, 14, 0, 0, + 891, 892, 7, 10, 0, 0, 892, 893, 7, 3, 0, 0, 893, 894, 7, 4, 0, 0, 894, + 176, 1, 0, 0, 0, 895, 896, 7, 6, 0, 0, 896, 897, 7, 7, 0, 0, 897, 898, + 7, 14, 0, 0, 898, 899, 7, 4, 0, 0, 899, 900, 7, 10, 0, 0, 900, 901, 7, + 0, 0, 0, 901, 902, 7, 8, 0, 0, 902, 178, 1, 0, 0, 0, 903, 904, 7, 6, 0, + 0, 904, 905, 7, 7, 0, 0, 905, 906, 7, 4, 0, 0, 906, 907, 7, 10, 0, 0, 907, + 908, 7, 3, 0, 0, 908, 909, 7, 14, 0, 0, 909, 910, 7, 10, 0, 0, 910, 911, + 7, 5, 0, 0, 911, 912, 7, 4, 0, 0, 912, 180, 1, 0, 0, 0, 913, 914, 7, 6, + 0, 0, 914, 915, 7, 7, 0, 0, 915, 916, 7, 4, 0, 0, 916, 917, 7, 2, 0, 0, + 917, 182, 1, 0, 0, 0, 918, 919, 7, 6, 0, 0, 919, 920, 7, 14, 0, 0, 920, + 184, 1, 0, 0, 0, 921, 922, 7, 6, 0, 0, 922, 923, 7, 14, 0, 0, 923, 924, + 7, 7, 0, 0, 924, 925, 7, 16, 0, 0, 925, 926, 7, 11, 0, 0, 926, 927, 7, + 11, 0, 0, 927, 186, 1, 0, 0, 0, 928, 929, 7, 24, 0, 0, 929, 930, 7, 2, + 0, 0, 930, 931, 7, 6, 0, 0, 931, 932, 7, 7, 0, 0, 932, 188, 1, 0, 0, 0, + 933, 934, 7, 20, 0, 0, 934, 935, 7, 10, 0, 0, 935, 936, 7, 12, 0, 0, 936, + 190, 1, 0, 0, 0, 937, 938, 7, 11, 0, 0, 938, 939, 7, 10, 0, 0, 939, 940, + 7, 9, 0, 0, 940, 941, 7, 4, 0, 0, 941, 192, 1, 0, 0, 0, 942, 943, 7, 11, + 0, 0, 943, 944, 7, 6, 0, 0, 944, 945, 7, 20, 0, 0, 945, 946, 7, 10, 0, + 0, 946, 194, 1, 0, 0, 0, 947, 948, 7, 11, 0, 0, 948, 949, 7, 6, 0, 0, 949, + 950, 7, 17, 0, 0, 950, 951, 7, 6, 0, 0, 951, 952, 7, 4, 0, 0, 952, 196, + 1, 0, 0, 0, 953, 954, 7, 17, 0, 0, 954, 955, 7, 0, 0, 0, 955, 956, 7, 4, + 0, 0, 956, 957, 7, 5, 0, 0, 957, 958, 7, 15, 0, 0, 958, 198, 1, 0, 0, 0, + 959, 960, 7, 7, 0, 0, 960, 961, 7, 0, 0, 0, 961, 962, 7, 4, 0, 0, 962, + 963, 7, 16, 0, 0, 963, 964, 7, 3, 0, 0, 964, 965, 7, 0, 0, 0, 965, 966, + 7, 11, 0, 0, 966, 200, 1, 0, 0, 0, 967, 968, 7, 7, 0, 0, 968, 969, 7, 2, + 0, 0, 969, 202, 1, 0, 0, 0, 970, 971, 7, 7, 0, 0, 971, 972, 7, 2, 0, 0, + 972, 973, 7, 4, 0, 0, 973, 204, 1, 0, 0, 0, 974, 975, 7, 7, 0, 0, 975, + 976, 7, 2, 0, 0, 976, 977, 7, 4, 0, 0, 977, 978, 7, 7, 0, 0, 978, 979, + 7, 16, 0, 0, 979, 980, 7, 11, 0, 0, 980, 981, 7, 11, 0, 0, 981, 206, 1, + 0, 0, 0, 982, 983, 7, 7, 0, 0, 983, 984, 7, 16, 0, 0, 984, 985, 7, 11, + 0, 0, 985, 986, 7, 11, 0, 0, 986, 208, 1, 0, 0, 0, 987, 988, 7, 2, 0, 0, + 988, 989, 7, 9, 0, 0, 989, 210, 1, 0, 0, 0, 990, 991, 7, 2, 0, 0, 991, + 992, 7, 9, 0, 0, 992, 993, 7, 9, 0, 0, 993, 994, 7, 14, 0, 0, 994, 995, + 7, 10, 0, 0, 995, 996, 7, 4, 0, 0, 996, 212, 1, 0, 0, 0, 997, 998, 7, 2, + 0, 0, 998, 999, 7, 7, 0, 0, 999, 214, 1, 0, 0, 0, 1000, 1001, 7, 2, 0, + 0, 1001, 1002, 7, 3, 0, 0, 1002, 216, 1, 0, 0, 0, 1003, 1004, 7, 2, 0, + 0, 1004, 1005, 7, 3, 0, 0, 1005, 1006, 7, 8, 0, 0, 1006, 1007, 7, 10, 0, + 0, 1007, 1008, 7, 3, 0, 0, 1008, 218, 1, 0, 0, 0, 1009, 1010, 7, 2, 0, + 0, 1010, 1011, 7, 16, 0, 0, 1011, 1012, 7, 4, 0, 0, 1012, 1013, 7, 10, + 0, 0, 1013, 1014, 7, 3, 0, 0, 1014, 220, 1, 0, 0, 0, 1015, 1016, 7, 21, + 0, 0, 1016, 1017, 7, 11, 0, 0, 1017, 1018, 7, 0, 0, 0, 1018, 1019, 7, 7, + 0, 0, 1019, 222, 1, 0, 0, 0, 1020, 1021, 7, 21, 0, 0, 1021, 1022, 7, 3, + 0, 0, 1022, 1023, 7, 0, 0, 0, 1023, 1024, 7, 18, 0, 0, 1024, 1025, 7, 17, + 0, 0, 1025, 1026, 7, 0, 0, 0, 1026, 224, 1, 0, 0, 0, 1027, 1028, 7, 21, + 0, 0, 1028, 1029, 7, 3, 0, 0, 1029, 1030, 7, 6, 0, 0, 1030, 1031, 7, 17, + 0, 0, 1031, 1032, 7, 0, 0, 0, 1032, 1033, 7, 3, 0, 0, 1033, 1034, 7, 12, + 0, 0, 1034, 226, 1, 0, 0, 0, 1035, 1036, 7, 25, 0, 0, 1036, 1037, 7, 16, + 0, 0, 1037, 1038, 7, 10, 0, 0, 1038, 1039, 7, 3, 0, 0, 1039, 1040, 7, 12, + 0, 0, 1040, 228, 1, 0, 0, 0, 1041, 1042, 7, 3, 0, 0, 1042, 1043, 7, 0, + 0, 0, 1043, 1044, 7, 6, 0, 0, 1044, 1045, 7, 14, 0, 0, 1045, 1046, 7, 10, + 0, 0, 1046, 230, 1, 0, 0, 0, 1047, 1048, 7, 3, 0, 0, 1048, 1049, 7, 10, + 0, 0, 1049, 1050, 7, 5, 0, 0, 1050, 1051, 7, 16, 0, 0, 1051, 1052, 7, 3, + 0, 0, 1052, 1053, 7, 14, 0, 0, 1053, 1054, 7, 6, 0, 0, 1054, 1055, 7, 23, + 0, 0, 1055, 1056, 7, 10, 0, 0, 1056, 232, 1, 0, 0, 0, 1057, 1058, 7, 3, + 0, 0, 1058, 1059, 7, 10, 0, 0, 1059, 1060, 7, 9, 0, 0, 1060, 1061, 7, 10, + 0, 0, 1061, 1062, 7, 3, 0, 0, 1062, 1063, 7, 10, 0, 0, 1063, 1064, 7, 7, + 0, 0, 1064, 1065, 7, 5, 0, 0, 1065, 1066, 7, 10, 0, 0, 1066, 1067, 7, 14, + 0, 0, 1067, 234, 1, 0, 0, 0, 1068, 1069, 7, 3, 0, 0, 1069, 1070, 7, 10, + 0, 0, 1070, 1071, 7, 18, 0, 0, 1071, 1072, 7, 10, 0, 0, 1072, 1073, 7, + 22, 0, 0, 1073, 1074, 7, 21, 0, 0, 1074, 236, 1, 0, 0, 0, 1075, 1076, 7, + 3, 0, 0, 1076, 1077, 7, 10, 0, 0, 1077, 1078, 7, 6, 0, 0, 1078, 1079, 7, + 7, 0, 0, 1079, 1080, 7, 8, 0, 0, 1080, 1081, 7, 10, 0, 0, 1081, 1082, 7, + 22, 0, 0, 1082, 238, 1, 0, 0, 0, 1083, 1084, 7, 3, 0, 0, 1084, 1085, 7, + 10, 0, 0, 1085, 1086, 7, 11, 0, 0, 1086, 1087, 7, 10, 0, 0, 1087, 1088, + 7, 0, 0, 0, 1088, 1089, 7, 14, 0, 0, 1089, 1090, 7, 10, 0, 0, 1090, 240, + 1, 0, 0, 0, 1091, 1092, 7, 3, 0, 0, 1092, 1093, 7, 10, 0, 0, 1093, 1094, + 7, 7, 0, 0, 1094, 1095, 7, 0, 0, 0, 1095, 1096, 7, 17, 0, 0, 1096, 1097, + 7, 10, 0, 0, 1097, 242, 1, 0, 0, 0, 1098, 1099, 7, 3, 0, 0, 1099, 1100, + 7, 10, 0, 0, 1100, 1101, 7, 21, 0, 0, 1101, 1102, 7, 11, 0, 0, 1102, 1103, + 7, 0, 0, 0, 1103, 1104, 7, 5, 0, 0, 1104, 1105, 7, 10, 0, 0, 1105, 244, + 1, 0, 0, 0, 1106, 1107, 7, 3, 0, 0, 1107, 1108, 7, 10, 0, 0, 1108, 1109, + 7, 14, 0, 0, 1109, 1110, 7, 4, 0, 0, 1110, 1111, 7, 3, 0, 0, 1111, 1112, + 7, 6, 0, 0, 1112, 1113, 7, 5, 0, 0, 1113, 1114, 7, 4, 0, 0, 1114, 246, + 1, 0, 0, 0, 1115, 1116, 7, 3, 0, 0, 1116, 1117, 7, 10, 0, 0, 1117, 1118, + 7, 4, 0, 0, 1118, 1119, 7, 16, 0, 0, 1119, 1120, 7, 3, 0, 0, 1120, 1121, + 7, 7, 0, 0, 1121, 1122, 7, 6, 0, 0, 1122, 1123, 7, 7, 0, 0, 1123, 1124, + 7, 18, 0, 0, 1124, 248, 1, 0, 0, 0, 1125, 1126, 7, 3, 0, 0, 1126, 1127, + 7, 6, 0, 0, 1127, 1128, 7, 18, 0, 0, 1128, 1129, 7, 15, 0, 0, 1129, 1130, + 7, 4, 0, 0, 1130, 250, 1, 0, 0, 0, 1131, 1132, 7, 3, 0, 0, 1132, 1133, + 7, 2, 0, 0, 1133, 1134, 7, 11, 0, 0, 1134, 1135, 7, 11, 0, 0, 1135, 1136, + 7, 1, 0, 0, 1136, 1137, 7, 0, 0, 0, 1137, 1138, 7, 5, 0, 0, 1138, 1139, + 7, 20, 0, 0, 1139, 252, 1, 0, 0, 0, 1140, 1141, 7, 3, 0, 0, 1141, 1142, + 7, 2, 0, 0, 1142, 1143, 7, 19, 0, 0, 1143, 254, 1, 0, 0, 0, 1144, 1145, + 7, 3, 0, 0, 1145, 1146, 7, 2, 0, 0, 1146, 1147, 7, 19, 0, 0, 1147, 1148, + 7, 14, 0, 0, 1148, 256, 1, 0, 0, 0, 1149, 1150, 7, 14, 0, 0, 1150, 1151, + 7, 0, 0, 0, 1151, 1152, 7, 23, 0, 0, 1152, 1153, 7, 10, 0, 0, 1153, 1154, + 7, 21, 0, 0, 1154, 1155, 7, 2, 0, 0, 1155, 1156, 7, 6, 0, 0, 1156, 1157, + 7, 7, 0, 0, 1157, 1158, 7, 4, 0, 0, 1158, 258, 1, 0, 0, 0, 1159, 1160, + 7, 14, 0, 0, 1160, 1161, 7, 10, 0, 0, 1161, 1162, 7, 11, 0, 0, 1162, 1163, + 7, 10, 0, 0, 1163, 1164, 7, 5, 0, 0, 1164, 1165, 7, 4, 0, 0, 1165, 260, + 1, 0, 0, 0, 1166, 1167, 7, 14, 0, 0, 1167, 1168, 7, 10, 0, 0, 1168, 1169, + 7, 4, 0, 0, 1169, 262, 1, 0, 0, 0, 1170, 1171, 7, 4, 0, 0, 1171, 1172, + 7, 0, 0, 0, 1172, 1173, 7, 1, 0, 0, 1173, 1174, 7, 11, 0, 0, 1174, 1175, + 7, 10, 0, 0, 1175, 264, 1, 0, 0, 0, 1176, 1177, 7, 4, 0, 0, 1177, 1178, + 7, 10, 0, 0, 1178, 1179, 7, 17, 0, 0, 1179, 1180, 7, 21, 0, 0, 1180, 266, + 1, 0, 0, 0, 1181, 1182, 7, 4, 0, 0, 1182, 1183, 7, 10, 0, 0, 1183, 1184, + 7, 17, 0, 0, 1184, 1185, 7, 21, 0, 0, 1185, 1186, 7, 2, 0, 0, 1186, 1187, + 7, 3, 0, 0, 1187, 1188, 7, 0, 0, 0, 1188, 1189, 7, 3, 0, 0, 1189, 1190, + 7, 12, 0, 0, 1190, 268, 1, 0, 0, 0, 1191, 1192, 7, 4, 0, 0, 1192, 1193, + 7, 15, 0, 0, 1193, 1194, 7, 10, 0, 0, 1194, 1195, 7, 7, 0, 0, 1195, 270, + 1, 0, 0, 0, 1196, 1197, 7, 4, 0, 0, 1197, 1198, 7, 2, 0, 0, 1198, 272, + 1, 0, 0, 0, 1199, 1200, 7, 4, 0, 0, 1200, 1201, 7, 3, 0, 0, 1201, 1202, + 7, 0, 0, 0, 1202, 1203, 7, 7, 0, 0, 1203, 1204, 7, 14, 0, 0, 1204, 1205, + 7, 0, 0, 0, 1205, 1206, 7, 5, 0, 0, 1206, 1207, 7, 4, 0, 0, 1207, 1208, + 7, 6, 0, 0, 1208, 1209, 7, 2, 0, 0, 1209, 1210, 7, 7, 0, 0, 1210, 274, + 1, 0, 0, 0, 1211, 1212, 7, 4, 0, 0, 1212, 1213, 7, 3, 0, 0, 1213, 1214, + 7, 6, 0, 0, 1214, 1215, 7, 18, 0, 0, 1215, 1216, 7, 18, 0, 0, 1216, 1217, + 7, 10, 0, 0, 1217, 1218, 7, 3, 0, 0, 1218, 276, 1, 0, 0, 0, 1219, 1220, + 7, 16, 0, 0, 1220, 1221, 7, 7, 0, 0, 1221, 1222, 7, 6, 0, 0, 1222, 1223, + 7, 2, 0, 0, 1223, 1224, 7, 7, 0, 0, 1224, 278, 1, 0, 0, 0, 1225, 1226, + 7, 16, 0, 0, 1226, 1227, 7, 7, 0, 0, 1227, 1228, 7, 6, 0, 0, 1228, 1229, + 7, 25, 0, 0, 1229, 1230, 7, 16, 0, 0, 1230, 1231, 7, 10, 0, 0, 1231, 280, + 1, 0, 0, 0, 1232, 1233, 7, 16, 0, 0, 1233, 1234, 7, 21, 0, 0, 1234, 1235, + 7, 8, 0, 0, 1235, 1236, 7, 0, 0, 0, 1236, 1237, 7, 4, 0, 0, 1237, 1238, + 7, 10, 0, 0, 1238, 282, 1, 0, 0, 0, 1239, 1240, 7, 16, 0, 0, 1240, 1241, + 7, 14, 0, 0, 1241, 1242, 7, 6, 0, 0, 1242, 1243, 7, 7, 0, 0, 1243, 1244, + 7, 18, 0, 0, 1244, 284, 1, 0, 0, 0, 1245, 1246, 7, 23, 0, 0, 1246, 1247, + 7, 0, 0, 0, 1247, 1248, 7, 5, 0, 0, 1248, 1249, 7, 16, 0, 0, 1249, 1250, + 7, 16, 0, 0, 1250, 1251, 7, 17, 0, 0, 1251, 286, 1, 0, 0, 0, 1252, 1253, + 7, 23, 0, 0, 1253, 1254, 7, 0, 0, 0, 1254, 1255, 7, 11, 0, 0, 1255, 1256, + 7, 16, 0, 0, 1256, 1257, 7, 10, 0, 0, 1257, 1258, 7, 14, 0, 0, 1258, 288, + 1, 0, 0, 0, 1259, 1260, 7, 23, 0, 0, 1260, 1261, 7, 6, 0, 0, 1261, 1262, + 7, 10, 0, 0, 1262, 1263, 7, 19, 0, 0, 1263, 290, 1, 0, 0, 0, 1264, 1265, + 7, 23, 0, 0, 1265, 1266, 7, 6, 0, 0, 1266, 1267, 7, 3, 0, 0, 1267, 1268, + 7, 4, 0, 0, 1268, 1269, 7, 16, 0, 0, 1269, 1270, 7, 0, 0, 0, 1270, 1271, + 7, 11, 0, 0, 1271, 292, 1, 0, 0, 0, 1272, 1273, 7, 19, 0, 0, 1273, 1274, + 7, 15, 0, 0, 1274, 1275, 7, 10, 0, 0, 1275, 1276, 7, 7, 0, 0, 1276, 294, + 1, 0, 0, 0, 1277, 1278, 7, 19, 0, 0, 1278, 1279, 7, 15, 0, 0, 1279, 1280, + 7, 10, 0, 0, 1280, 1281, 7, 3, 0, 0, 1281, 1282, 7, 10, 0, 0, 1282, 296, + 1, 0, 0, 0, 1283, 1284, 7, 19, 0, 0, 1284, 1285, 7, 6, 0, 0, 1285, 1286, + 7, 4, 0, 0, 1286, 1287, 7, 15, 0, 0, 1287, 298, 1, 0, 0, 0, 1288, 1289, + 7, 19, 0, 0, 1289, 1290, 7, 6, 0, 0, 1290, 1291, 7, 4, 0, 0, 1291, 1292, + 7, 15, 0, 0, 1292, 1293, 7, 2, 0, 0, 1293, 1294, 7, 16, 0, 0, 1294, 1295, + 7, 4, 0, 0, 1295, 300, 1, 0, 0, 0, 1296, 1297, 7, 9, 0, 0, 1297, 1298, + 7, 6, 0, 0, 1298, 1299, 7, 3, 0, 0, 1299, 1300, 7, 14, 0, 0, 1300, 1301, + 7, 4, 0, 0, 1301, 1302, 5, 95, 0, 0, 1302, 1303, 7, 23, 0, 0, 1303, 1304, + 7, 0, 0, 0, 1304, 1305, 7, 11, 0, 0, 1305, 1306, 7, 16, 0, 0, 1306, 1307, + 7, 10, 0, 0, 1307, 302, 1, 0, 0, 0, 1308, 1309, 7, 2, 0, 0, 1309, 1310, + 7, 23, 0, 0, 1310, 1311, 7, 10, 0, 0, 1311, 1312, 7, 3, 0, 0, 1312, 304, + 1, 0, 0, 0, 1313, 1314, 7, 21, 0, 0, 1314, 1315, 7, 0, 0, 0, 1315, 1316, + 7, 3, 0, 0, 1316, 1317, 7, 4, 0, 0, 1317, 1318, 7, 6, 0, 0, 1318, 1319, + 7, 4, 0, 0, 1319, 1320, 7, 6, 0, 0, 1320, 1321, 7, 2, 0, 0, 1321, 1322, + 7, 7, 0, 0, 1322, 306, 1, 0, 0, 0, 1323, 1324, 7, 3, 0, 0, 1324, 1325, + 7, 0, 0, 0, 1325, 1326, 7, 7, 0, 0, 1326, 1327, 7, 18, 0, 0, 1327, 1328, + 7, 10, 0, 0, 1328, 308, 1, 0, 0, 0, 1329, 1330, 7, 21, 0, 0, 1330, 1331, + 7, 3, 0, 0, 1331, 1332, 7, 10, 0, 0, 1332, 1333, 7, 5, 0, 0, 1333, 1334, + 7, 10, 0, 0, 1334, 1335, 7, 8, 0, 0, 1335, 1336, 7, 6, 0, 0, 1336, 1337, + 7, 7, 0, 0, 1337, 1338, 7, 18, 0, 0, 1338, 310, 1, 0, 0, 0, 1339, 1340, + 7, 16, 0, 0, 1340, 1341, 7, 7, 0, 0, 1341, 1342, 7, 1, 0, 0, 1342, 1343, + 7, 2, 0, 0, 1343, 1344, 7, 16, 0, 0, 1344, 1345, 7, 7, 0, 0, 1345, 1346, + 7, 8, 0, 0, 1346, 1347, 7, 10, 0, 0, 1347, 1348, 7, 8, 0, 0, 1348, 312, + 1, 0, 0, 0, 1349, 1350, 7, 5, 0, 0, 1350, 1351, 7, 16, 0, 0, 1351, 1352, + 7, 3, 0, 0, 1352, 1353, 7, 3, 0, 0, 1353, 1354, 7, 10, 0, 0, 1354, 1355, + 7, 7, 0, 0, 1355, 1356, 7, 4, 0, 0, 1356, 314, 1, 0, 0, 0, 1357, 1358, + 7, 9, 0, 0, 1358, 1359, 7, 2, 0, 0, 1359, 1360, 7, 11, 0, 0, 1360, 1361, + 7, 11, 0, 0, 1361, 1362, 7, 2, 0, 0, 1362, 1363, 7, 19, 0, 0, 1363, 1364, + 7, 6, 0, 0, 1364, 1365, 7, 7, 0, 0, 1365, 1366, 7, 18, 0, 0, 1366, 316, + 1, 0, 0, 0, 1367, 1368, 7, 5, 0, 0, 1368, 1369, 7, 16, 0, 0, 1369, 1370, + 7, 17, 0, 0, 1370, 1371, 7, 10, 0, 0, 1371, 1372, 5, 95, 0, 0, 1372, 1373, + 7, 8, 0, 0, 1373, 1374, 7, 6, 0, 0, 1374, 1375, 7, 14, 0, 0, 1375, 1376, + 7, 4, 0, 0, 1376, 318, 1, 0, 0, 0, 1377, 1378, 7, 8, 0, 0, 1378, 1379, + 7, 10, 0, 0, 1379, 1380, 7, 7, 0, 0, 1380, 1381, 7, 14, 0, 0, 1381, 1382, + 7, 10, 0, 0, 1382, 1383, 5, 95, 0, 0, 1383, 1384, 7, 3, 0, 0, 1384, 1385, + 7, 0, 0, 0, 1385, 1386, 7, 7, 0, 0, 1386, 1387, 7, 20, 0, 0, 1387, 320, + 1, 0, 0, 0, 1388, 1389, 7, 11, 0, 0, 1389, 1390, 7, 0, 0, 0, 1390, 1391, + 7, 18, 0, 0, 1391, 322, 1, 0, 0, 0, 1392, 1393, 7, 11, 0, 0, 1393, 1394, + 7, 0, 0, 0, 1394, 1395, 7, 14, 0, 0, 1395, 1396, 7, 4, 0, 0, 1396, 1397, + 5, 95, 0, 0, 1397, 1398, 7, 23, 0, 0, 1398, 1399, 7, 0, 0, 0, 1399, 1400, + 7, 11, 0, 0, 1400, 1401, 7, 16, 0, 0, 1401, 1402, 7, 10, 0, 0, 1402, 324, + 1, 0, 0, 0, 1403, 1404, 7, 11, 0, 0, 1404, 1405, 7, 10, 0, 0, 1405, 1406, + 7, 0, 0, 0, 1406, 1407, 7, 8, 0, 0, 1407, 326, 1, 0, 0, 0, 1408, 1409, + 7, 7, 0, 0, 1409, 1410, 7, 4, 0, 0, 1410, 1411, 7, 15, 0, 0, 1411, 1412, + 5, 95, 0, 0, 1412, 1413, 7, 23, 0, 0, 1413, 1414, 7, 0, 0, 0, 1414, 1415, + 7, 11, 0, 0, 1415, 1416, 7, 16, 0, 0, 1416, 1417, 7, 10, 0, 0, 1417, 328, + 1, 0, 0, 0, 1418, 1419, 7, 7, 0, 0, 1419, 1420, 7, 4, 0, 0, 1420, 1421, + 7, 6, 0, 0, 1421, 1422, 7, 11, 0, 0, 1422, 1423, 7, 10, 0, 0, 1423, 330, + 1, 0, 0, 0, 1424, 1425, 7, 21, 0, 0, 1425, 1426, 7, 10, 0, 0, 1426, 1427, + 7, 3, 0, 0, 1427, 1428, 7, 5, 0, 0, 1428, 1429, 7, 10, 0, 0, 1429, 1430, + 7, 7, 0, 0, 1430, 1431, 7, 4, 0, 0, 1431, 1432, 5, 95, 0, 0, 1432, 1433, + 7, 3, 0, 0, 1433, 1434, 7, 0, 0, 0, 1434, 1435, 7, 7, 0, 0, 1435, 1436, + 7, 20, 0, 0, 1436, 332, 1, 0, 0, 0, 1437, 1438, 7, 3, 0, 0, 1438, 1439, + 7, 0, 0, 0, 1439, 1440, 7, 7, 0, 0, 1440, 1441, 7, 20, 0, 0, 1441, 334, + 1, 0, 0, 0, 1442, 1443, 7, 3, 0, 0, 1443, 1444, 7, 2, 0, 0, 1444, 1445, + 7, 19, 0, 0, 1445, 1446, 5, 95, 0, 0, 1446, 1447, 7, 7, 0, 0, 1447, 1448, + 7, 16, 0, 0, 1448, 1449, 7, 17, 0, 0, 1449, 1450, 7, 1, 0, 0, 1450, 1451, + 7, 10, 0, 0, 1451, 1452, 7, 3, 0, 0, 1452, 336, 1, 0, 0, 0, 1453, 1454, + 7, 18, 0, 0, 1454, 1455, 7, 10, 0, 0, 1455, 1456, 7, 7, 0, 0, 1456, 1457, + 7, 10, 0, 0, 1457, 1458, 7, 3, 0, 0, 1458, 1459, 7, 0, 0, 0, 1459, 1460, + 7, 4, 0, 0, 1460, 1461, 7, 10, 0, 0, 1461, 1462, 7, 8, 0, 0, 1462, 338, + 1, 0, 0, 0, 1463, 1464, 7, 0, 0, 0, 1464, 1465, 7, 11, 0, 0, 1465, 1466, + 7, 19, 0, 0, 1466, 1467, 7, 0, 0, 0, 1467, 1468, 7, 12, 0, 0, 1468, 1469, + 7, 14, 0, 0, 1469, 340, 1, 0, 0, 0, 1470, 1471, 7, 14, 0, 0, 1471, 1472, + 7, 4, 0, 0, 1472, 1473, 7, 2, 0, 0, 1473, 1474, 7, 3, 0, 0, 1474, 1475, + 7, 10, 0, 0, 1475, 1476, 7, 8, 0, 0, 1476, 342, 1, 0, 0, 0, 1477, 1478, + 7, 4, 0, 0, 1478, 1479, 7, 3, 0, 0, 1479, 1480, 7, 16, 0, 0, 1480, 1481, + 7, 10, 0, 0, 1481, 344, 1, 0, 0, 0, 1482, 1483, 7, 9, 0, 0, 1483, 1484, + 7, 0, 0, 0, 1484, 1485, 7, 11, 0, 0, 1485, 1486, 7, 14, 0, 0, 1486, 1487, + 7, 10, 0, 0, 1487, 346, 1, 0, 0, 0, 1488, 1489, 7, 19, 0, 0, 1489, 1490, + 7, 6, 0, 0, 1490, 1491, 7, 7, 0, 0, 1491, 1492, 7, 8, 0, 0, 1492, 1493, + 7, 2, 0, 0, 1493, 1494, 7, 19, 0, 0, 1494, 348, 1, 0, 0, 0, 1495, 1496, + 7, 7, 0, 0, 1496, 1497, 7, 16, 0, 0, 1497, 1498, 7, 11, 0, 0, 1498, 1499, + 7, 11, 0, 0, 1499, 1500, 7, 14, 0, 0, 1500, 350, 1, 0, 0, 0, 1501, 1502, + 7, 9, 0, 0, 1502, 1503, 7, 6, 0, 0, 1503, 1504, 7, 3, 0, 0, 1504, 1505, + 7, 14, 0, 0, 1505, 1506, 7, 4, 0, 0, 1506, 352, 1, 0, 0, 0, 1507, 1508, + 7, 11, 0, 0, 1508, 1509, 7, 0, 0, 0, 1509, 1510, 7, 14, 0, 0, 1510, 1511, + 7, 4, 0, 0, 1511, 354, 1, 0, 0, 0, 1512, 1513, 7, 9, 0, 0, 1513, 1514, + 7, 6, 0, 0, 1514, 1515, 7, 11, 0, 0, 1515, 1516, 7, 4, 0, 0, 1516, 1517, + 7, 10, 0, 0, 1517, 1518, 7, 3, 0, 0, 1518, 356, 1, 0, 0, 0, 1519, 1520, + 7, 18, 0, 0, 1520, 1521, 7, 3, 0, 0, 1521, 1522, 7, 2, 0, 0, 1522, 1523, + 7, 16, 0, 0, 1523, 1524, 7, 21, 0, 0, 1524, 1525, 7, 14, 0, 0, 1525, 358, + 1, 0, 0, 0, 1526, 1527, 7, 10, 0, 0, 1527, 1528, 7, 22, 0, 0, 1528, 1529, + 7, 5, 0, 0, 1529, 1530, 7, 11, 0, 0, 1530, 1531, 7, 16, 0, 0, 1531, 1532, + 7, 8, 0, 0, 1532, 1533, 7, 10, 0, 0, 1533, 360, 1, 0, 0, 0, 1534, 1535, + 7, 4, 0, 0, 1535, 1536, 7, 6, 0, 0, 1536, 1537, 7, 10, 0, 0, 1537, 1538, + 7, 14, 0, 0, 1538, 362, 1, 0, 0, 0, 1539, 1540, 7, 2, 0, 0, 1540, 1541, + 7, 4, 0, 0, 1541, 1542, 7, 15, 0, 0, 1542, 1543, 7, 10, 0, 0, 1543, 1544, + 7, 3, 0, 0, 1544, 1545, 7, 14, 0, 0, 1545, 364, 1, 0, 0, 0, 1546, 1547, + 7, 8, 0, 0, 1547, 1548, 7, 2, 0, 0, 1548, 366, 1, 0, 0, 0, 1549, 1550, + 7, 7, 0, 0, 1550, 1551, 7, 2, 0, 0, 1551, 1552, 7, 4, 0, 0, 1552, 1553, + 7, 15, 0, 0, 1553, 1554, 7, 6, 0, 0, 1554, 1555, 7, 7, 0, 0, 1555, 1556, + 7, 18, 0, 0, 1556, 368, 1, 0, 0, 0, 1557, 1563, 5, 34, 0, 0, 1558, 1562, + 8, 26, 0, 0, 1559, 1560, 5, 34, 0, 0, 1560, 1562, 5, 34, 0, 0, 1561, 1558, + 1, 0, 0, 0, 1561, 1559, 1, 0, 0, 0, 1562, 1565, 1, 0, 0, 0, 1563, 1561, + 1, 0, 0, 0, 1563, 1564, 1, 0, 0, 0, 1564, 1566, 1, 0, 0, 0, 1565, 1563, + 1, 0, 0, 0, 1566, 1593, 5, 34, 0, 0, 1567, 1573, 5, 96, 0, 0, 1568, 1572, + 8, 27, 0, 0, 1569, 1570, 5, 96, 0, 0, 1570, 1572, 5, 96, 0, 0, 1571, 1568, + 1, 0, 0, 0, 1571, 1569, 1, 0, 0, 0, 1572, 1575, 1, 0, 0, 0, 1573, 1571, + 1, 0, 0, 0, 1573, 1574, 1, 0, 0, 0, 1574, 1576, 1, 0, 0, 0, 1575, 1573, + 1, 0, 0, 0, 1576, 1593, 5, 96, 0, 0, 1577, 1581, 5, 91, 0, 0, 1578, 1580, + 8, 28, 0, 0, 1579, 1578, 1, 0, 0, 0, 1580, 1583, 1, 0, 0, 0, 1581, 1579, + 1, 0, 0, 0, 1581, 1582, 1, 0, 0, 0, 1582, 1584, 1, 0, 0, 0, 1583, 1581, + 1, 0, 0, 0, 1584, 1593, 5, 93, 0, 0, 1585, 1589, 7, 29, 0, 0, 1586, 1588, + 7, 30, 0, 0, 1587, 1586, 1, 0, 0, 0, 1588, 1591, 1, 0, 0, 0, 1589, 1587, + 1, 0, 0, 0, 1589, 1590, 1, 0, 0, 0, 1590, 1593, 1, 0, 0, 0, 1591, 1589, + 1, 0, 0, 0, 1592, 1557, 1, 0, 0, 0, 1592, 1567, 1, 0, 0, 0, 1592, 1577, + 1, 0, 0, 0, 1592, 1585, 1, 0, 0, 0, 1593, 370, 1, 0, 0, 0, 1594, 1596, + 3, 389, 194, 0, 1595, 1594, 1, 0, 0, 0, 1596, 1597, 1, 0, 0, 0, 1597, 1595, + 1, 0, 0, 0, 1597, 1598, 1, 0, 0, 0, 1598, 1606, 1, 0, 0, 0, 1599, 1603, + 5, 46, 0, 0, 1600, 1602, 3, 389, 194, 0, 1601, 1600, 1, 0, 0, 0, 1602, + 1605, 1, 0, 0, 0, 1603, 1601, 1, 0, 0, 0, 1603, 1604, 1, 0, 0, 0, 1604, + 1607, 1, 0, 0, 0, 1605, 1603, 1, 0, 0, 0, 1606, 1599, 1, 0, 0, 0, 1606, + 1607, 1, 0, 0, 0, 1607, 1615, 1, 0, 0, 0, 1608, 1610, 5, 46, 0, 0, 1609, + 1611, 3, 389, 194, 0, 1610, 1609, 1, 0, 0, 0, 1611, 1612, 1, 0, 0, 0, 1612, + 1610, 1, 0, 0, 0, 1612, 1613, 1, 0, 0, 0, 1613, 1615, 1, 0, 0, 0, 1614, + 1595, 1, 0, 0, 0, 1614, 1608, 1, 0, 0, 0, 1615, 1625, 1, 0, 0, 0, 1616, + 1618, 7, 10, 0, 0, 1617, 1619, 7, 31, 0, 0, 1618, 1617, 1, 0, 0, 0, 1618, + 1619, 1, 0, 0, 0, 1619, 1621, 1, 0, 0, 0, 1620, 1622, 3, 389, 194, 0, 1621, + 1620, 1, 0, 0, 0, 1622, 1623, 1, 0, 0, 0, 1623, 1621, 1, 0, 0, 0, 1623, + 1624, 1, 0, 0, 0, 1624, 1626, 1, 0, 0, 0, 1625, 1616, 1, 0, 0, 0, 1625, + 1626, 1, 0, 0, 0, 1626, 1636, 1, 0, 0, 0, 1627, 1628, 5, 48, 0, 0, 1628, + 1629, 7, 22, 0, 0, 1629, 1631, 1, 0, 0, 0, 1630, 1632, 3, 387, 193, 0, + 1631, 1630, 1, 0, 0, 0, 1632, 1633, 1, 0, 0, 0, 1633, 1631, 1, 0, 0, 0, + 1633, 1634, 1, 0, 0, 0, 1634, 1636, 1, 0, 0, 0, 1635, 1614, 1, 0, 0, 0, + 1635, 1627, 1, 0, 0, 0, 1636, 372, 1, 0, 0, 0, 1637, 1641, 5, 63, 0, 0, + 1638, 1640, 3, 389, 194, 0, 1639, 1638, 1, 0, 0, 0, 1640, 1643, 1, 0, 0, + 0, 1641, 1639, 1, 0, 0, 0, 1641, 1642, 1, 0, 0, 0, 1642, 1647, 1, 0, 0, + 0, 1643, 1641, 1, 0, 0, 0, 1644, 1645, 7, 32, 0, 0, 1645, 1647, 3, 369, + 184, 0, 1646, 1637, 1, 0, 0, 0, 1646, 1644, 1, 0, 0, 0, 1647, 374, 1, 0, + 0, 0, 1648, 1654, 5, 39, 0, 0, 1649, 1653, 8, 33, 0, 0, 1650, 1651, 5, + 39, 0, 0, 1651, 1653, 5, 39, 0, 0, 1652, 1649, 1, 0, 0, 0, 1652, 1650, + 1, 0, 0, 0, 1653, 1656, 1, 0, 0, 0, 1654, 1652, 1, 0, 0, 0, 1654, 1655, + 1, 0, 0, 0, 1655, 1657, 1, 0, 0, 0, 1656, 1654, 1, 0, 0, 0, 1657, 1658, + 5, 39, 0, 0, 1658, 376, 1, 0, 0, 0, 1659, 1660, 7, 22, 0, 0, 1660, 1661, + 3, 375, 187, 0, 1661, 378, 1, 0, 0, 0, 1662, 1663, 5, 45, 0, 0, 1663, 1664, + 5, 45, 0, 0, 1664, 1668, 1, 0, 0, 0, 1665, 1667, 8, 34, 0, 0, 1666, 1665, + 1, 0, 0, 0, 1667, 1670, 1, 0, 0, 0, 1668, 1666, 1, 0, 0, 0, 1668, 1669, + 1, 0, 0, 0, 1669, 1676, 1, 0, 0, 0, 1670, 1668, 1, 0, 0, 0, 1671, 1673, + 5, 13, 0, 0, 1672, 1671, 1, 0, 0, 0, 1672, 1673, 1, 0, 0, 0, 1673, 1674, + 1, 0, 0, 0, 1674, 1677, 5, 10, 0, 0, 1675, 1677, 5, 0, 0, 1, 1676, 1672, + 1, 0, 0, 0, 1676, 1675, 1, 0, 0, 0, 1677, 1678, 1, 0, 0, 0, 1678, 1679, + 6, 189, 0, 0, 1679, 380, 1, 0, 0, 0, 1680, 1681, 5, 47, 0, 0, 1681, 1682, + 5, 42, 0, 0, 1682, 1686, 1, 0, 0, 0, 1683, 1685, 9, 0, 0, 0, 1684, 1683, + 1, 0, 0, 0, 1685, 1688, 1, 0, 0, 0, 1686, 1687, 1, 0, 0, 0, 1686, 1684, + 1, 0, 0, 0, 1687, 1689, 1, 0, 0, 0, 1688, 1686, 1, 0, 0, 0, 1689, 1690, + 5, 42, 0, 0, 1690, 1691, 5, 47, 0, 0, 1691, 1692, 1, 0, 0, 0, 1692, 1693, + 6, 190, 0, 0, 1693, 382, 1, 0, 0, 0, 1694, 1695, 7, 35, 0, 0, 1695, 1696, + 1, 0, 0, 0, 1696, 1697, 6, 191, 0, 0, 1697, 384, 1, 0, 0, 0, 1698, 1699, + 9, 0, 0, 0, 1699, 386, 1, 0, 0, 0, 1700, 1701, 7, 36, 0, 0, 1701, 388, + 1, 0, 0, 0, 1702, 1703, 7, 37, 0, 0, 1703, 390, 1, 0, 0, 0, 26, 0, 1561, + 1563, 1571, 1573, 1581, 1589, 1592, 1597, 1603, 1606, 1612, 1614, 1618, + 1623, 1625, 1633, 1635, 1641, 1646, 1652, 1654, 1668, 1672, 1676, 1686, + 1, 0, 1, 0, + } + deserializer := antlr.NewATNDeserializer(nil) + staticData.atn = deserializer.Deserialize(staticData.serializedATN) + atn := staticData.atn + staticData.decisionToDFA = make([]*antlr.DFA, len(atn.DecisionToState)) + decisionToDFA := staticData.decisionToDFA + for index, state := range atn.DecisionToState { + decisionToDFA[index] = antlr.NewDFA(state, index) + } +} + +// SQLiteLexerInit initializes any static state used to implement SQLiteLexer. By default the +// static state used to implement the lexer is lazily initialized during the first call to +// NewSQLiteLexer(). You can call this function if you wish to initialize the static state ahead +// of time. +func SQLiteLexerInit() { + staticData := &sqlitelexerLexerStaticData + staticData.once.Do(sqlitelexerLexerInit) +} + +// NewSQLiteLexer produces a new lexer instance for the optional input antlr.CharStream. +func NewSQLiteLexer(input antlr.CharStream) *SQLiteLexer { + SQLiteLexerInit() + l := new(SQLiteLexer) + l.BaseLexer = antlr.NewBaseLexer(input) + staticData := &sqlitelexerLexerStaticData + l.Interpreter = antlr.NewLexerATNSimulator(l, staticData.atn, staticData.decisionToDFA, staticData.predictionContextCache) + l.channelNames = staticData.channelNames + l.modeNames = staticData.modeNames + l.RuleNames = staticData.ruleNames + l.LiteralNames = staticData.literalNames + l.SymbolicNames = staticData.symbolicNames + l.GrammarFileName = "SQLiteLexer.g4" + // TODO: l.EOF = antlr.TokenEOF + + return l +} + +// SQLiteLexer tokens. +const ( + SQLiteLexerSCOL = 1 + SQLiteLexerDOT = 2 + SQLiteLexerOPEN_PAR = 3 + SQLiteLexerCLOSE_PAR = 4 + SQLiteLexerCOMMA = 5 + SQLiteLexerASSIGN = 6 + SQLiteLexerSTAR = 7 + SQLiteLexerPLUS = 8 + SQLiteLexerMINUS = 9 + SQLiteLexerTILDE = 10 + SQLiteLexerPIPE2 = 11 + SQLiteLexerDIV = 12 + SQLiteLexerMOD = 13 + SQLiteLexerLT2 = 14 + SQLiteLexerGT2 = 15 + SQLiteLexerAMP = 16 + SQLiteLexerPIPE = 17 + SQLiteLexerLT = 18 + SQLiteLexerLT_EQ = 19 + SQLiteLexerGT = 20 + SQLiteLexerGT_EQ = 21 + SQLiteLexerEQ = 22 + SQLiteLexerNOT_EQ1 = 23 + SQLiteLexerNOT_EQ2 = 24 + SQLiteLexerABORT_ = 25 + SQLiteLexerACTION_ = 26 + SQLiteLexerADD_ = 27 + SQLiteLexerAFTER_ = 28 + SQLiteLexerALL_ = 29 + SQLiteLexerALTER_ = 30 + SQLiteLexerANALYZE_ = 31 + SQLiteLexerAND_ = 32 + SQLiteLexerAS_ = 33 + SQLiteLexerASC_ = 34 + SQLiteLexerATTACH_ = 35 + SQLiteLexerAUTOINCREMENT_ = 36 + SQLiteLexerBEFORE_ = 37 + SQLiteLexerBEGIN_ = 38 + SQLiteLexerBETWEEN_ = 39 + SQLiteLexerBY_ = 40 + SQLiteLexerCASCADE_ = 41 + SQLiteLexerCASE_ = 42 + SQLiteLexerCAST_ = 43 + SQLiteLexerCHECK_ = 44 + SQLiteLexerCOLLATE_ = 45 + SQLiteLexerCOLUMN_ = 46 + SQLiteLexerCOMMIT_ = 47 + SQLiteLexerCONFLICT_ = 48 + SQLiteLexerCONSTRAINT_ = 49 + SQLiteLexerCREATE_ = 50 + SQLiteLexerCROSS_ = 51 + SQLiteLexerCURRENT_DATE_ = 52 + SQLiteLexerCURRENT_TIME_ = 53 + SQLiteLexerCURRENT_TIMESTAMP_ = 54 + SQLiteLexerDATABASE_ = 55 + SQLiteLexerDEFAULT_ = 56 + SQLiteLexerDEFERRABLE_ = 57 + SQLiteLexerDEFERRED_ = 58 + SQLiteLexerDELETE_ = 59 + SQLiteLexerDESC_ = 60 + SQLiteLexerDETACH_ = 61 + SQLiteLexerDISTINCT_ = 62 + SQLiteLexerDROP_ = 63 + SQLiteLexerEACH_ = 64 + SQLiteLexerELSE_ = 65 + SQLiteLexerEND_ = 66 + SQLiteLexerESCAPE_ = 67 + SQLiteLexerEXCEPT_ = 68 + SQLiteLexerEXCLUSIVE_ = 69 + SQLiteLexerEXISTS_ = 70 + SQLiteLexerEXPLAIN_ = 71 + SQLiteLexerFAIL_ = 72 + SQLiteLexerFOR_ = 73 + SQLiteLexerFOREIGN_ = 74 + SQLiteLexerFROM_ = 75 + SQLiteLexerFULL_ = 76 + SQLiteLexerGLOB_ = 77 + SQLiteLexerGROUP_ = 78 + SQLiteLexerHAVING_ = 79 + SQLiteLexerIF_ = 80 + SQLiteLexerIGNORE_ = 81 + SQLiteLexerIMMEDIATE_ = 82 + SQLiteLexerIN_ = 83 + SQLiteLexerINDEX_ = 84 + SQLiteLexerINDEXED_ = 85 + SQLiteLexerINITIALLY_ = 86 + SQLiteLexerINNER_ = 87 + SQLiteLexerINSERT_ = 88 + SQLiteLexerINSTEAD_ = 89 + SQLiteLexerINTERSECT_ = 90 + SQLiteLexerINTO_ = 91 + SQLiteLexerIS_ = 92 + SQLiteLexerISNULL_ = 93 + SQLiteLexerJOIN_ = 94 + SQLiteLexerKEY_ = 95 + SQLiteLexerLEFT_ = 96 + SQLiteLexerLIKE_ = 97 + SQLiteLexerLIMIT_ = 98 + SQLiteLexerMATCH_ = 99 + SQLiteLexerNATURAL_ = 100 + SQLiteLexerNO_ = 101 + SQLiteLexerNOT_ = 102 + SQLiteLexerNOTNULL_ = 103 + SQLiteLexerNULL_ = 104 + SQLiteLexerOF_ = 105 + SQLiteLexerOFFSET_ = 106 + SQLiteLexerON_ = 107 + SQLiteLexerOR_ = 108 + SQLiteLexerORDER_ = 109 + SQLiteLexerOUTER_ = 110 + SQLiteLexerPLAN_ = 111 + SQLiteLexerPRAGMA_ = 112 + SQLiteLexerPRIMARY_ = 113 + SQLiteLexerQUERY_ = 114 + SQLiteLexerRAISE_ = 115 + SQLiteLexerRECURSIVE_ = 116 + SQLiteLexerREFERENCES_ = 117 + SQLiteLexerREGEXP_ = 118 + SQLiteLexerREINDEX_ = 119 + SQLiteLexerRELEASE_ = 120 + SQLiteLexerRENAME_ = 121 + SQLiteLexerREPLACE_ = 122 + SQLiteLexerRESTRICT_ = 123 + SQLiteLexerRETURNING_ = 124 + SQLiteLexerRIGHT_ = 125 + SQLiteLexerROLLBACK_ = 126 + SQLiteLexerROW_ = 127 + SQLiteLexerROWS_ = 128 + SQLiteLexerSAVEPOINT_ = 129 + SQLiteLexerSELECT_ = 130 + SQLiteLexerSET_ = 131 + SQLiteLexerTABLE_ = 132 + SQLiteLexerTEMP_ = 133 + SQLiteLexerTEMPORARY_ = 134 + SQLiteLexerTHEN_ = 135 + SQLiteLexerTO_ = 136 + SQLiteLexerTRANSACTION_ = 137 + SQLiteLexerTRIGGER_ = 138 + SQLiteLexerUNION_ = 139 + SQLiteLexerUNIQUE_ = 140 + SQLiteLexerUPDATE_ = 141 + SQLiteLexerUSING_ = 142 + SQLiteLexerVACUUM_ = 143 + SQLiteLexerVALUES_ = 144 + SQLiteLexerVIEW_ = 145 + SQLiteLexerVIRTUAL_ = 146 + SQLiteLexerWHEN_ = 147 + SQLiteLexerWHERE_ = 148 + SQLiteLexerWITH_ = 149 + SQLiteLexerWITHOUT_ = 150 + SQLiteLexerFIRST_VALUE_ = 151 + SQLiteLexerOVER_ = 152 + SQLiteLexerPARTITION_ = 153 + SQLiteLexerRANGE_ = 154 + SQLiteLexerPRECEDING_ = 155 + SQLiteLexerUNBOUNDED_ = 156 + SQLiteLexerCURRENT_ = 157 + SQLiteLexerFOLLOWING_ = 158 + SQLiteLexerCUME_DIST_ = 159 + SQLiteLexerDENSE_RANK_ = 160 + SQLiteLexerLAG_ = 161 + SQLiteLexerLAST_VALUE_ = 162 + SQLiteLexerLEAD_ = 163 + SQLiteLexerNTH_VALUE_ = 164 + SQLiteLexerNTILE_ = 165 + SQLiteLexerPERCENT_RANK_ = 166 + SQLiteLexerRANK_ = 167 + SQLiteLexerROW_NUMBER_ = 168 + SQLiteLexerGENERATED_ = 169 + SQLiteLexerALWAYS_ = 170 + SQLiteLexerSTORED_ = 171 + SQLiteLexerTRUE_ = 172 + SQLiteLexerFALSE_ = 173 + SQLiteLexerWINDOW_ = 174 + SQLiteLexerNULLS_ = 175 + SQLiteLexerFIRST_ = 176 + SQLiteLexerLAST_ = 177 + SQLiteLexerFILTER_ = 178 + SQLiteLexerGROUPS_ = 179 + SQLiteLexerEXCLUDE_ = 180 + SQLiteLexerTIES_ = 181 + SQLiteLexerOTHERS_ = 182 + SQLiteLexerDO_ = 183 + SQLiteLexerNOTHING_ = 184 + SQLiteLexerIDENTIFIER = 185 + SQLiteLexerNUMERIC_LITERAL = 186 + SQLiteLexerBIND_PARAMETER = 187 + SQLiteLexerSTRING_LITERAL = 188 + SQLiteLexerBLOB_LITERAL = 189 + SQLiteLexerSINGLE_LINE_COMMENT = 190 + SQLiteLexerMULTILINE_COMMENT = 191 + SQLiteLexerSPACES = 192 + SQLiteLexerUNEXPECTED_CHAR = 193 +) diff --git a/vendor/github.com/libsql/sqlite-antlr4-parser/sqliteparser/sqlite_parser.go b/vendor/github.com/libsql/sqlite-antlr4-parser/sqliteparser/sqlite_parser.go index aec55f79fb..6e634aa288 100644 --- a/vendor/github.com/libsql/sqlite-antlr4-parser/sqliteparser/sqlite_parser.go +++ b/vendor/github.com/libsql/sqlite-antlr4-parser/sqliteparser/sqlite_parser.go @@ -1,28739 +1,28739 @@ -// Code generated from SQLiteParser.g4 by ANTLR 4.12.0. DO NOT EDIT. - -package sqliteparser // SQLiteParser -import ( - "fmt" - "strconv" - "sync" - - "github.com/antlr/antlr4/runtime/Go/antlr/v4" -) - -// Suppress unused import errors -var _ = fmt.Printf -var _ = strconv.Itoa -var _ = sync.Once{} - -type SQLiteParser struct { - *antlr.BaseParser -} - -var sqliteparserParserStaticData struct { - once sync.Once - serializedATN []int32 - literalNames []string - symbolicNames []string - ruleNames []string - predictionContextCache *antlr.PredictionContextCache - atn *antlr.ATN - decisionToDFA []*antlr.DFA -} - -func sqliteparserParserInit() { - staticData := &sqliteparserParserStaticData - staticData.literalNames = []string{ - "", "';'", "'.'", "'('", "')'", "','", "'='", "'*'", "'+'", "'-'", "'~'", - "'||'", "'/'", "'%'", "'<<'", "'>>'", "'&'", "'|'", "'<'", "'<='", "'>'", - "'>='", "'=='", "'!='", "'<>'", "'ABORT'", "'ACTION'", "'ADD'", "'AFTER'", - "'ALL'", "'ALTER'", "'ANALYZE'", "'AND'", "'AS'", "'ASC'", "'ATTACH'", - "'AUTOINCREMENT'", "'BEFORE'", "'BEGIN'", "'BETWEEN'", "'BY'", "'CASCADE'", - "'CASE'", "'CAST'", "'CHECK'", "'COLLATE'", "'COLUMN'", "'COMMIT'", - "'CONFLICT'", "'CONSTRAINT'", "'CREATE'", "'CROSS'", "'CURRENT_DATE'", - "'CURRENT_TIME'", "'CURRENT_TIMESTAMP'", "'DATABASE'", "'DEFAULT'", - "'DEFERRABLE'", "'DEFERRED'", "'DELETE'", "'DESC'", "'DETACH'", "'DISTINCT'", - "'DROP'", "'EACH'", "'ELSE'", "'END'", "'ESCAPE'", "'EXCEPT'", "'EXCLUSIVE'", - "'EXISTS'", "'EXPLAIN'", "'FAIL'", "'FOR'", "'FOREIGN'", "'FROM'", "'FULL'", - "'GLOB'", "'GROUP'", "'HAVING'", "'IF'", "'IGNORE'", "'IMMEDIATE'", - "'IN'", "'INDEX'", "'INDEXED'", "'INITIALLY'", "'INNER'", "'INSERT'", - "'INSTEAD'", "'INTERSECT'", "'INTO'", "'IS'", "'ISNULL'", "'JOIN'", - "'KEY'", "'LEFT'", "'LIKE'", "'LIMIT'", "'MATCH'", "'NATURAL'", "'NO'", - "'NOT'", "'NOTNULL'", "'NULL'", "'OF'", "'OFFSET'", "'ON'", "'OR'", - "'ORDER'", "'OUTER'", "'PLAN'", "'PRAGMA'", "'PRIMARY'", "'QUERY'", - "'RAISE'", "'RECURSIVE'", "'REFERENCES'", "'REGEXP'", "'REINDEX'", "'RELEASE'", - "'RENAME'", "'REPLACE'", "'RESTRICT'", "'RETURNING'", "'RIGHT'", "'ROLLBACK'", - "'ROW'", "'ROWS'", "'SAVEPOINT'", "'SELECT'", "'SET'", "'TABLE'", "'TEMP'", - "'TEMPORARY'", "'THEN'", "'TO'", "'TRANSACTION'", "'TRIGGER'", "'UNION'", - "'UNIQUE'", "'UPDATE'", "'USING'", "'VACUUM'", "'VALUES'", "'VIEW'", - "'VIRTUAL'", "'WHEN'", "'WHERE'", "'WITH'", "'WITHOUT'", "'FIRST_VALUE'", - "'OVER'", "'PARTITION'", "'RANGE'", "'PRECEDING'", "'UNBOUNDED'", "'CURRENT'", - "'FOLLOWING'", "'CUME_DIST'", "'DENSE_RANK'", "'LAG'", "'LAST_VALUE'", - "'LEAD'", "'NTH_VALUE'", "'NTILE'", "'PERCENT_RANK'", "'RANK'", "'ROW_NUMBER'", - "'GENERATED'", "'ALWAYS'", "'STORED'", "'TRUE'", "'FALSE'", "'WINDOW'", - "'NULLS'", "'FIRST'", "'LAST'", "'FILTER'", "'GROUPS'", "'EXCLUDE'", - "'TIES'", "'OTHERS'", "'DO'", "'NOTHING'", - } - staticData.symbolicNames = []string{ - "", "SCOL", "DOT", "OPEN_PAR", "CLOSE_PAR", "COMMA", "ASSIGN", "STAR", - "PLUS", "MINUS", "TILDE", "PIPE2", "DIV", "MOD", "LT2", "GT2", "AMP", - "PIPE", "LT", "LT_EQ", "GT", "GT_EQ", "EQ", "NOT_EQ1", "NOT_EQ2", "ABORT_", - "ACTION_", "ADD_", "AFTER_", "ALL_", "ALTER_", "ANALYZE_", "AND_", "AS_", - "ASC_", "ATTACH_", "AUTOINCREMENT_", "BEFORE_", "BEGIN_", "BETWEEN_", - "BY_", "CASCADE_", "CASE_", "CAST_", "CHECK_", "COLLATE_", "COLUMN_", - "COMMIT_", "CONFLICT_", "CONSTRAINT_", "CREATE_", "CROSS_", "CURRENT_DATE_", - "CURRENT_TIME_", "CURRENT_TIMESTAMP_", "DATABASE_", "DEFAULT_", "DEFERRABLE_", - "DEFERRED_", "DELETE_", "DESC_", "DETACH_", "DISTINCT_", "DROP_", "EACH_", - "ELSE_", "END_", "ESCAPE_", "EXCEPT_", "EXCLUSIVE_", "EXISTS_", "EXPLAIN_", - "FAIL_", "FOR_", "FOREIGN_", "FROM_", "FULL_", "GLOB_", "GROUP_", "HAVING_", - "IF_", "IGNORE_", "IMMEDIATE_", "IN_", "INDEX_", "INDEXED_", "INITIALLY_", - "INNER_", "INSERT_", "INSTEAD_", "INTERSECT_", "INTO_", "IS_", "ISNULL_", - "JOIN_", "KEY_", "LEFT_", "LIKE_", "LIMIT_", "MATCH_", "NATURAL_", "NO_", - "NOT_", "NOTNULL_", "NULL_", "OF_", "OFFSET_", "ON_", "OR_", "ORDER_", - "OUTER_", "PLAN_", "PRAGMA_", "PRIMARY_", "QUERY_", "RAISE_", "RECURSIVE_", - "REFERENCES_", "REGEXP_", "REINDEX_", "RELEASE_", "RENAME_", "REPLACE_", - "RESTRICT_", "RETURNING_", "RIGHT_", "ROLLBACK_", "ROW_", "ROWS_", "SAVEPOINT_", - "SELECT_", "SET_", "TABLE_", "TEMP_", "TEMPORARY_", "THEN_", "TO_", - "TRANSACTION_", "TRIGGER_", "UNION_", "UNIQUE_", "UPDATE_", "USING_", - "VACUUM_", "VALUES_", "VIEW_", "VIRTUAL_", "WHEN_", "WHERE_", "WITH_", - "WITHOUT_", "FIRST_VALUE_", "OVER_", "PARTITION_", "RANGE_", "PRECEDING_", - "UNBOUNDED_", "CURRENT_", "FOLLOWING_", "CUME_DIST_", "DENSE_RANK_", - "LAG_", "LAST_VALUE_", "LEAD_", "NTH_VALUE_", "NTILE_", "PERCENT_RANK_", - "RANK_", "ROW_NUMBER_", "GENERATED_", "ALWAYS_", "STORED_", "TRUE_", - "FALSE_", "WINDOW_", "NULLS_", "FIRST_", "LAST_", "FILTER_", "GROUPS_", - "EXCLUDE_", "TIES_", "OTHERS_", "DO_", "NOTHING_", "IDENTIFIER", "NUMERIC_LITERAL", - "BIND_PARAMETER", "STRING_LITERAL", "BLOB_LITERAL", "SINGLE_LINE_COMMENT", - "MULTILINE_COMMENT", "SPACES", "UNEXPECTED_CHAR", - } - staticData.ruleNames = []string{ - "parse", "sql_stmt_list", "sql_stmt", "alter_table_stmt", "analyze_stmt", - "attach_stmt", "begin_stmt", "commit_stmt", "rollback_stmt", "savepoint_stmt", - "release_stmt", "create_index_stmt", "indexed_column", "create_table_stmt", - "column_def", "type_name", "column_constraint", "signed_number", "table_constraint", - "foreign_key_clause", "conflict_clause", "create_trigger_stmt", "create_view_stmt", - "create_virtual_table_stmt", "with_clause", "cte_table_name", "recursive_cte", - "common_table_expression", "delete_stmt", "delete_stmt_limited", "detach_stmt", - "drop_stmt", "expr", "raise_function", "literal_value", "value_row", - "values_clause", "insert_stmt", "returning_clause", "upsert_clause", - "pragma_stmt", "pragma_value", "reindex_stmt", "select_stmt", "join_clause", - "select_core", "factored_select_stmt", "simple_select_stmt", "compound_select_stmt", - "table_or_subquery", "result_column", "join_operator", "join_constraint", - "compound_operator", "update_stmt", "column_name_list", "update_stmt_limited", - "qualified_table_name", "vacuum_stmt", "filter_clause", "window_defn", - "over_clause", "frame_spec", "frame_clause", "simple_function_invocation", - "aggregate_function_invocation", "window_function_invocation", "common_table_stmt", - "order_by_stmt", "limit_stmt", "ordering_term", "asc_desc", "frame_left", - "frame_right", "frame_single", "window_function", "offset", "default_value", - "partition_by", "order_by_expr", "order_by_expr_asc_desc", "expr_asc_desc", - "initial_select", "recursive_select", "unary_operator", "error_message", - "module_argument", "column_alias", "keyword", "name", "function_name", - "schema_name", "table_name", "table_or_index_name", "column_name", "collation_name", - "foreign_table", "index_name", "trigger_name", "view_name", "module_name", - "pragma_name", "savepoint_name", "table_alias", "transaction_name", - "window_name", "alias", "filename", "base_window_name", "simple_func", - "aggregate_func", "table_function_name", "any_name", - } - staticData.predictionContextCache = antlr.NewPredictionContextCache() - staticData.serializedATN = []int32{ - 4, 1, 193, 2056, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, - 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, - 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, - 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7, 20, - 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25, 2, - 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2, 31, - 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36, 7, - 36, 2, 37, 7, 37, 2, 38, 7, 38, 2, 39, 7, 39, 2, 40, 7, 40, 2, 41, 7, 41, - 2, 42, 7, 42, 2, 43, 7, 43, 2, 44, 7, 44, 2, 45, 7, 45, 2, 46, 7, 46, 2, - 47, 7, 47, 2, 48, 7, 48, 2, 49, 7, 49, 2, 50, 7, 50, 2, 51, 7, 51, 2, 52, - 7, 52, 2, 53, 7, 53, 2, 54, 7, 54, 2, 55, 7, 55, 2, 56, 7, 56, 2, 57, 7, - 57, 2, 58, 7, 58, 2, 59, 7, 59, 2, 60, 7, 60, 2, 61, 7, 61, 2, 62, 7, 62, - 2, 63, 7, 63, 2, 64, 7, 64, 2, 65, 7, 65, 2, 66, 7, 66, 2, 67, 7, 67, 2, - 68, 7, 68, 2, 69, 7, 69, 2, 70, 7, 70, 2, 71, 7, 71, 2, 72, 7, 72, 2, 73, - 7, 73, 2, 74, 7, 74, 2, 75, 7, 75, 2, 76, 7, 76, 2, 77, 7, 77, 2, 78, 7, - 78, 2, 79, 7, 79, 2, 80, 7, 80, 2, 81, 7, 81, 2, 82, 7, 82, 2, 83, 7, 83, - 2, 84, 7, 84, 2, 85, 7, 85, 2, 86, 7, 86, 2, 87, 7, 87, 2, 88, 7, 88, 2, - 89, 7, 89, 2, 90, 7, 90, 2, 91, 7, 91, 2, 92, 7, 92, 2, 93, 7, 93, 2, 94, - 7, 94, 2, 95, 7, 95, 2, 96, 7, 96, 2, 97, 7, 97, 2, 98, 7, 98, 2, 99, 7, - 99, 2, 100, 7, 100, 2, 101, 7, 101, 2, 102, 7, 102, 2, 103, 7, 103, 2, - 104, 7, 104, 2, 105, 7, 105, 2, 106, 7, 106, 2, 107, 7, 107, 2, 108, 7, - 108, 2, 109, 7, 109, 2, 110, 7, 110, 2, 111, 7, 111, 2, 112, 7, 112, 1, - 0, 5, 0, 228, 8, 0, 10, 0, 12, 0, 231, 9, 0, 1, 0, 1, 0, 1, 1, 5, 1, 236, - 8, 1, 10, 1, 12, 1, 239, 9, 1, 1, 1, 1, 1, 4, 1, 243, 8, 1, 11, 1, 12, - 1, 244, 1, 1, 5, 1, 248, 8, 1, 10, 1, 12, 1, 251, 9, 1, 1, 1, 5, 1, 254, - 8, 1, 10, 1, 12, 1, 257, 9, 1, 1, 2, 1, 2, 1, 2, 3, 2, 262, 8, 2, 3, 2, - 264, 8, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, - 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, - 1, 2, 1, 2, 3, 2, 290, 8, 2, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 3, 3, 297, 8, - 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 3, 3, 304, 8, 3, 1, 3, 1, 3, 1, 3, 1, - 3, 3, 3, 310, 8, 3, 1, 3, 1, 3, 3, 3, 314, 8, 3, 1, 3, 1, 3, 1, 3, 3, 3, - 319, 8, 3, 1, 3, 3, 3, 322, 8, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 3, 4, 329, - 8, 4, 1, 4, 3, 4, 332, 8, 4, 1, 5, 1, 5, 3, 5, 336, 8, 5, 1, 5, 1, 5, 1, - 5, 1, 5, 1, 6, 1, 6, 3, 6, 344, 8, 6, 1, 6, 1, 6, 3, 6, 348, 8, 6, 3, 6, - 350, 8, 6, 1, 7, 1, 7, 3, 7, 354, 8, 7, 1, 8, 1, 8, 3, 8, 358, 8, 8, 1, - 8, 1, 8, 3, 8, 362, 8, 8, 1, 8, 3, 8, 365, 8, 8, 1, 9, 1, 9, 1, 9, 1, 10, - 1, 10, 3, 10, 372, 8, 10, 1, 10, 1, 10, 1, 11, 1, 11, 3, 11, 378, 8, 11, - 1, 11, 1, 11, 1, 11, 1, 11, 3, 11, 384, 8, 11, 1, 11, 1, 11, 1, 11, 3, - 11, 389, 8, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 5, 11, - 398, 8, 11, 10, 11, 12, 11, 401, 9, 11, 1, 11, 1, 11, 1, 11, 3, 11, 406, - 8, 11, 1, 12, 1, 12, 3, 12, 410, 8, 12, 1, 12, 1, 12, 3, 12, 414, 8, 12, - 1, 12, 3, 12, 417, 8, 12, 1, 13, 1, 13, 3, 13, 421, 8, 13, 1, 13, 1, 13, - 1, 13, 1, 13, 3, 13, 427, 8, 13, 1, 13, 1, 13, 1, 13, 3, 13, 432, 8, 13, - 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 5, 13, 439, 8, 13, 10, 13, 12, 13, 442, - 9, 13, 1, 13, 1, 13, 5, 13, 446, 8, 13, 10, 13, 12, 13, 449, 9, 13, 1, - 13, 1, 13, 1, 13, 3, 13, 454, 8, 13, 1, 13, 1, 13, 3, 13, 458, 8, 13, 1, - 14, 1, 14, 3, 14, 462, 8, 14, 1, 14, 5, 14, 465, 8, 14, 10, 14, 12, 14, - 468, 9, 14, 1, 15, 4, 15, 471, 8, 15, 11, 15, 12, 15, 472, 1, 15, 1, 15, - 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 3, 15, 485, 8, - 15, 1, 16, 1, 16, 3, 16, 489, 8, 16, 1, 16, 1, 16, 1, 16, 3, 16, 494, 8, - 16, 1, 16, 3, 16, 497, 8, 16, 1, 16, 3, 16, 500, 8, 16, 1, 16, 3, 16, 503, - 8, 16, 1, 16, 1, 16, 3, 16, 507, 8, 16, 1, 16, 3, 16, 510, 8, 16, 1, 16, - 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, - 16, 3, 16, 524, 8, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 3, 16, 531, 8, - 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 3, 16, 538, 8, 16, 3, 16, 540, 8, - 16, 1, 17, 3, 17, 543, 8, 17, 1, 17, 1, 17, 1, 18, 1, 18, 3, 18, 549, 8, - 18, 1, 18, 1, 18, 1, 18, 3, 18, 554, 8, 18, 1, 18, 1, 18, 1, 18, 1, 18, - 5, 18, 560, 8, 18, 10, 18, 12, 18, 563, 9, 18, 1, 18, 1, 18, 3, 18, 567, - 8, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, - 18, 1, 18, 5, 18, 580, 8, 18, 10, 18, 12, 18, 583, 9, 18, 1, 18, 1, 18, - 1, 18, 3, 18, 588, 8, 18, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 5, - 19, 596, 8, 19, 10, 19, 12, 19, 599, 9, 19, 1, 19, 1, 19, 3, 19, 603, 8, - 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 3, 19, 613, - 8, 19, 1, 19, 1, 19, 5, 19, 617, 8, 19, 10, 19, 12, 19, 620, 9, 19, 1, - 19, 3, 19, 623, 8, 19, 1, 19, 1, 19, 1, 19, 3, 19, 628, 8, 19, 3, 19, 630, - 8, 19, 1, 20, 1, 20, 1, 20, 1, 20, 1, 21, 1, 21, 3, 21, 638, 8, 21, 1, - 21, 1, 21, 1, 21, 1, 21, 3, 21, 644, 8, 21, 1, 21, 1, 21, 1, 21, 3, 21, - 649, 8, 21, 1, 21, 1, 21, 1, 21, 1, 21, 1, 21, 3, 21, 656, 8, 21, 1, 21, - 1, 21, 1, 21, 1, 21, 1, 21, 1, 21, 1, 21, 5, 21, 665, 8, 21, 10, 21, 12, - 21, 668, 9, 21, 3, 21, 670, 8, 21, 3, 21, 672, 8, 21, 1, 21, 1, 21, 1, - 21, 1, 21, 1, 21, 3, 21, 679, 8, 21, 1, 21, 1, 21, 3, 21, 683, 8, 21, 1, - 21, 1, 21, 1, 21, 1, 21, 1, 21, 3, 21, 690, 8, 21, 1, 21, 1, 21, 4, 21, - 694, 8, 21, 11, 21, 12, 21, 695, 1, 21, 1, 21, 1, 22, 1, 22, 3, 22, 702, - 8, 22, 1, 22, 1, 22, 1, 22, 1, 22, 3, 22, 708, 8, 22, 1, 22, 1, 22, 1, - 22, 3, 22, 713, 8, 22, 1, 22, 1, 22, 1, 22, 1, 22, 1, 22, 5, 22, 720, 8, - 22, 10, 22, 12, 22, 723, 9, 22, 1, 22, 1, 22, 3, 22, 727, 8, 22, 1, 22, - 1, 22, 1, 22, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 3, 23, 738, 8, - 23, 1, 23, 1, 23, 1, 23, 3, 23, 743, 8, 23, 1, 23, 1, 23, 1, 23, 1, 23, - 1, 23, 1, 23, 1, 23, 5, 23, 752, 8, 23, 10, 23, 12, 23, 755, 9, 23, 1, - 23, 1, 23, 3, 23, 759, 8, 23, 1, 24, 1, 24, 3, 24, 763, 8, 24, 1, 24, 1, - 24, 1, 24, 1, 24, 1, 24, 1, 24, 1, 24, 1, 24, 1, 24, 1, 24, 1, 24, 1, 24, - 5, 24, 777, 8, 24, 10, 24, 12, 24, 780, 9, 24, 1, 25, 1, 25, 1, 25, 1, - 25, 1, 25, 5, 25, 787, 8, 25, 10, 25, 12, 25, 790, 9, 25, 1, 25, 1, 25, - 3, 25, 794, 8, 25, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 3, 26, 802, - 8, 26, 1, 26, 1, 26, 1, 26, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 5, 27, 812, - 8, 27, 10, 27, 12, 27, 815, 9, 27, 1, 27, 1, 27, 3, 27, 819, 8, 27, 1, - 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 28, 3, 28, 827, 8, 28, 1, 28, 1, 28, - 1, 28, 1, 28, 1, 28, 3, 28, 834, 8, 28, 1, 28, 3, 28, 837, 8, 28, 1, 29, - 3, 29, 840, 8, 29, 1, 29, 1, 29, 1, 29, 1, 29, 1, 29, 3, 29, 847, 8, 29, - 1, 29, 3, 29, 850, 8, 29, 1, 29, 3, 29, 853, 8, 29, 1, 29, 3, 29, 856, - 8, 29, 1, 30, 1, 30, 3, 30, 860, 8, 30, 1, 30, 1, 30, 1, 31, 1, 31, 1, - 31, 1, 31, 3, 31, 868, 8, 31, 1, 31, 1, 31, 1, 31, 3, 31, 873, 8, 31, 1, - 31, 1, 31, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 3, 32, 883, 8, 32, - 1, 32, 1, 32, 1, 32, 3, 32, 888, 8, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, - 32, 1, 32, 1, 32, 3, 32, 897, 8, 32, 1, 32, 1, 32, 1, 32, 5, 32, 902, 8, - 32, 10, 32, 12, 32, 905, 9, 32, 1, 32, 3, 32, 908, 8, 32, 1, 32, 1, 32, - 3, 32, 912, 8, 32, 1, 32, 3, 32, 915, 8, 32, 1, 32, 1, 32, 1, 32, 1, 32, - 5, 32, 921, 8, 32, 10, 32, 12, 32, 924, 9, 32, 1, 32, 1, 32, 1, 32, 1, - 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 3, 32, 936, 8, 32, 1, 32, - 3, 32, 939, 8, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 3, 32, 947, - 8, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 4, 32, 954, 8, 32, 11, 32, 12, - 32, 955, 1, 32, 1, 32, 3, 32, 960, 8, 32, 1, 32, 1, 32, 1, 32, 3, 32, 965, - 8, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, - 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, - 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 3, 32, 995, 8, - 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, - 3, 32, 1007, 8, 32, 1, 32, 1, 32, 1, 32, 3, 32, 1012, 8, 32, 1, 32, 1, - 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 3, 32, 1024, - 8, 32, 1, 32, 1, 32, 1, 32, 1, 32, 3, 32, 1030, 8, 32, 1, 32, 1, 32, 1, - 32, 1, 32, 1, 32, 3, 32, 1037, 8, 32, 1, 32, 1, 32, 3, 32, 1041, 8, 32, - 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 5, 32, 1049, 8, 32, 10, 32, 12, - 32, 1052, 9, 32, 3, 32, 1054, 8, 32, 1, 32, 1, 32, 1, 32, 1, 32, 3, 32, - 1060, 8, 32, 1, 32, 1, 32, 1, 32, 1, 32, 3, 32, 1066, 8, 32, 1, 32, 1, - 32, 1, 32, 1, 32, 1, 32, 5, 32, 1073, 8, 32, 10, 32, 12, 32, 1076, 9, 32, - 3, 32, 1078, 8, 32, 1, 32, 1, 32, 3, 32, 1082, 8, 32, 5, 32, 1084, 8, 32, - 10, 32, 12, 32, 1087, 9, 32, 1, 33, 1, 33, 1, 33, 1, 33, 1, 33, 1, 33, - 3, 33, 1095, 8, 33, 1, 33, 1, 33, 1, 34, 1, 34, 1, 35, 1, 35, 1, 35, 1, - 35, 5, 35, 1105, 8, 35, 10, 35, 12, 35, 1108, 9, 35, 1, 35, 1, 35, 1, 36, - 1, 36, 1, 36, 1, 36, 5, 36, 1116, 8, 36, 10, 36, 12, 36, 1119, 9, 36, 1, - 37, 3, 37, 1122, 8, 37, 1, 37, 1, 37, 1, 37, 1, 37, 1, 37, 3, 37, 1129, - 8, 37, 1, 37, 1, 37, 1, 37, 1, 37, 3, 37, 1135, 8, 37, 1, 37, 1, 37, 1, - 37, 3, 37, 1140, 8, 37, 1, 37, 1, 37, 1, 37, 1, 37, 5, 37, 1146, 8, 37, - 10, 37, 12, 37, 1149, 9, 37, 1, 37, 1, 37, 3, 37, 1153, 8, 37, 1, 37, 1, - 37, 3, 37, 1157, 8, 37, 1, 37, 3, 37, 1160, 8, 37, 1, 37, 1, 37, 3, 37, - 1164, 8, 37, 1, 37, 3, 37, 1167, 8, 37, 1, 38, 1, 38, 1, 38, 1, 38, 5, - 38, 1173, 8, 38, 10, 38, 12, 38, 1176, 9, 38, 1, 39, 1, 39, 1, 39, 1, 39, - 1, 39, 1, 39, 5, 39, 1184, 8, 39, 10, 39, 12, 39, 1187, 9, 39, 1, 39, 1, - 39, 1, 39, 3, 39, 1192, 8, 39, 3, 39, 1194, 8, 39, 1, 39, 1, 39, 1, 39, - 1, 39, 1, 39, 1, 39, 3, 39, 1202, 8, 39, 1, 39, 1, 39, 1, 39, 1, 39, 1, - 39, 3, 39, 1209, 8, 39, 1, 39, 1, 39, 1, 39, 5, 39, 1214, 8, 39, 10, 39, - 12, 39, 1217, 9, 39, 1, 39, 1, 39, 3, 39, 1221, 8, 39, 3, 39, 1223, 8, - 39, 1, 40, 1, 40, 1, 40, 1, 40, 3, 40, 1229, 8, 40, 1, 40, 1, 40, 1, 40, - 1, 40, 1, 40, 1, 40, 1, 40, 3, 40, 1238, 8, 40, 1, 41, 1, 41, 1, 41, 3, - 41, 1243, 8, 41, 1, 42, 1, 42, 1, 42, 1, 42, 1, 42, 3, 42, 1250, 8, 42, - 1, 42, 1, 42, 3, 42, 1254, 8, 42, 3, 42, 1256, 8, 42, 1, 43, 3, 43, 1259, - 8, 43, 1, 43, 1, 43, 1, 43, 1, 43, 5, 43, 1265, 8, 43, 10, 43, 12, 43, - 1268, 9, 43, 1, 43, 3, 43, 1271, 8, 43, 1, 43, 3, 43, 1274, 8, 43, 1, 44, - 1, 44, 1, 44, 1, 44, 3, 44, 1280, 8, 44, 5, 44, 1282, 8, 44, 10, 44, 12, - 44, 1285, 9, 44, 1, 45, 1, 45, 3, 45, 1289, 8, 45, 1, 45, 1, 45, 1, 45, - 5, 45, 1294, 8, 45, 10, 45, 12, 45, 1297, 9, 45, 1, 45, 1, 45, 1, 45, 1, - 45, 5, 45, 1303, 8, 45, 10, 45, 12, 45, 1306, 9, 45, 1, 45, 3, 45, 1309, - 8, 45, 3, 45, 1311, 8, 45, 1, 45, 1, 45, 3, 45, 1315, 8, 45, 1, 45, 1, - 45, 1, 45, 1, 45, 1, 45, 5, 45, 1322, 8, 45, 10, 45, 12, 45, 1325, 9, 45, - 1, 45, 1, 45, 3, 45, 1329, 8, 45, 3, 45, 1331, 8, 45, 1, 45, 1, 45, 1, - 45, 1, 45, 1, 45, 1, 45, 1, 45, 1, 45, 1, 45, 5, 45, 1342, 8, 45, 10, 45, - 12, 45, 1345, 9, 45, 3, 45, 1347, 8, 45, 1, 45, 3, 45, 1350, 8, 45, 1, - 46, 1, 46, 1, 47, 3, 47, 1355, 8, 47, 1, 47, 1, 47, 3, 47, 1359, 8, 47, - 1, 47, 3, 47, 1362, 8, 47, 1, 48, 3, 48, 1365, 8, 48, 1, 48, 1, 48, 1, - 48, 3, 48, 1370, 8, 48, 1, 48, 1, 48, 3, 48, 1374, 8, 48, 1, 48, 4, 48, - 1377, 8, 48, 11, 48, 12, 48, 1378, 1, 48, 3, 48, 1382, 8, 48, 1, 48, 3, - 48, 1385, 8, 48, 1, 49, 1, 49, 1, 49, 3, 49, 1390, 8, 49, 1, 49, 1, 49, - 3, 49, 1394, 8, 49, 1, 49, 3, 49, 1397, 8, 49, 1, 49, 1, 49, 1, 49, 1, - 49, 1, 49, 3, 49, 1404, 8, 49, 1, 49, 1, 49, 1, 49, 3, 49, 1409, 8, 49, - 1, 49, 1, 49, 1, 49, 1, 49, 1, 49, 5, 49, 1416, 8, 49, 10, 49, 12, 49, - 1419, 9, 49, 1, 49, 1, 49, 3, 49, 1423, 8, 49, 1, 49, 3, 49, 1426, 8, 49, - 1, 49, 1, 49, 1, 49, 1, 49, 5, 49, 1432, 8, 49, 10, 49, 12, 49, 1435, 9, - 49, 1, 49, 3, 49, 1438, 8, 49, 1, 49, 1, 49, 1, 49, 1, 49, 1, 49, 1, 49, - 3, 49, 1446, 8, 49, 1, 49, 3, 49, 1449, 8, 49, 3, 49, 1451, 8, 49, 1, 50, - 1, 50, 1, 50, 1, 50, 1, 50, 1, 50, 1, 50, 3, 50, 1460, 8, 50, 1, 50, 3, - 50, 1463, 8, 50, 3, 50, 1465, 8, 50, 1, 51, 1, 51, 3, 51, 1469, 8, 51, - 1, 51, 1, 51, 3, 51, 1473, 8, 51, 1, 51, 1, 51, 3, 51, 1477, 8, 51, 1, - 51, 3, 51, 1480, 8, 51, 1, 52, 1, 52, 1, 52, 1, 52, 1, 52, 1, 52, 1, 52, - 5, 52, 1489, 8, 52, 10, 52, 12, 52, 1492, 9, 52, 1, 52, 1, 52, 3, 52, 1496, - 8, 52, 1, 53, 1, 53, 3, 53, 1500, 8, 53, 1, 53, 1, 53, 3, 53, 1504, 8, - 53, 1, 54, 3, 54, 1507, 8, 54, 1, 54, 1, 54, 1, 54, 3, 54, 1512, 8, 54, - 1, 54, 1, 54, 1, 54, 1, 54, 3, 54, 1518, 8, 54, 1, 54, 1, 54, 1, 54, 1, - 54, 1, 54, 3, 54, 1525, 8, 54, 1, 54, 1, 54, 1, 54, 5, 54, 1530, 8, 54, - 10, 54, 12, 54, 1533, 9, 54, 1, 54, 1, 54, 1, 54, 1, 54, 5, 54, 1539, 8, - 54, 10, 54, 12, 54, 1542, 9, 54, 1, 54, 3, 54, 1545, 8, 54, 3, 54, 1547, - 8, 54, 1, 54, 1, 54, 3, 54, 1551, 8, 54, 1, 54, 3, 54, 1554, 8, 54, 1, - 55, 1, 55, 1, 55, 1, 55, 5, 55, 1560, 8, 55, 10, 55, 12, 55, 1563, 9, 55, - 1, 55, 1, 55, 1, 56, 3, 56, 1568, 8, 56, 1, 56, 1, 56, 1, 56, 3, 56, 1573, - 8, 56, 1, 56, 1, 56, 1, 56, 1, 56, 3, 56, 1579, 8, 56, 1, 56, 1, 56, 1, - 56, 1, 56, 1, 56, 3, 56, 1586, 8, 56, 1, 56, 1, 56, 1, 56, 5, 56, 1591, - 8, 56, 10, 56, 12, 56, 1594, 9, 56, 1, 56, 1, 56, 3, 56, 1598, 8, 56, 1, - 56, 3, 56, 1601, 8, 56, 1, 56, 3, 56, 1604, 8, 56, 1, 56, 3, 56, 1607, - 8, 56, 1, 57, 1, 57, 1, 57, 3, 57, 1612, 8, 57, 1, 57, 1, 57, 1, 57, 3, - 57, 1617, 8, 57, 1, 57, 1, 57, 1, 57, 1, 57, 1, 57, 3, 57, 1624, 8, 57, - 1, 58, 1, 58, 3, 58, 1628, 8, 58, 1, 58, 1, 58, 3, 58, 1632, 8, 58, 1, - 59, 1, 59, 1, 59, 1, 59, 1, 59, 1, 59, 1, 60, 1, 60, 3, 60, 1642, 8, 60, - 1, 60, 1, 60, 1, 60, 1, 60, 1, 60, 5, 60, 1649, 8, 60, 10, 60, 12, 60, - 1652, 9, 60, 3, 60, 1654, 8, 60, 1, 60, 1, 60, 1, 60, 1, 60, 1, 60, 5, - 60, 1661, 8, 60, 10, 60, 12, 60, 1664, 9, 60, 1, 60, 3, 60, 1667, 8, 60, - 1, 60, 1, 60, 1, 61, 1, 61, 1, 61, 1, 61, 3, 61, 1675, 8, 61, 1, 61, 1, - 61, 1, 61, 1, 61, 1, 61, 5, 61, 1682, 8, 61, 10, 61, 12, 61, 1685, 9, 61, - 3, 61, 1687, 8, 61, 1, 61, 1, 61, 1, 61, 1, 61, 1, 61, 5, 61, 1694, 8, - 61, 10, 61, 12, 61, 1697, 9, 61, 3, 61, 1699, 8, 61, 1, 61, 3, 61, 1702, - 8, 61, 1, 61, 3, 61, 1705, 8, 61, 1, 62, 1, 62, 1, 62, 1, 62, 1, 62, 1, - 62, 1, 62, 1, 62, 3, 62, 1715, 8, 62, 3, 62, 1717, 8, 62, 1, 63, 1, 63, - 1, 63, 1, 63, 1, 63, 1, 63, 1, 63, 3, 63, 1726, 8, 63, 1, 64, 1, 64, 1, - 64, 1, 64, 1, 64, 5, 64, 1733, 8, 64, 10, 64, 12, 64, 1736, 9, 64, 1, 64, - 3, 64, 1739, 8, 64, 1, 64, 1, 64, 1, 65, 1, 65, 1, 65, 3, 65, 1746, 8, - 65, 1, 65, 1, 65, 1, 65, 5, 65, 1751, 8, 65, 10, 65, 12, 65, 1754, 9, 65, - 1, 65, 3, 65, 1757, 8, 65, 1, 65, 1, 65, 3, 65, 1761, 8, 65, 1, 66, 1, - 66, 1, 66, 1, 66, 1, 66, 5, 66, 1768, 8, 66, 10, 66, 12, 66, 1771, 9, 66, - 1, 66, 3, 66, 1774, 8, 66, 1, 66, 1, 66, 3, 66, 1778, 8, 66, 1, 66, 1, - 66, 1, 66, 3, 66, 1783, 8, 66, 1, 67, 1, 67, 3, 67, 1787, 8, 67, 1, 67, - 1, 67, 1, 67, 5, 67, 1792, 8, 67, 10, 67, 12, 67, 1795, 9, 67, 1, 68, 1, - 68, 1, 68, 1, 68, 1, 68, 5, 68, 1802, 8, 68, 10, 68, 12, 68, 1805, 9, 68, - 1, 69, 1, 69, 1, 69, 1, 69, 3, 69, 1811, 8, 69, 1, 70, 1, 70, 1, 70, 3, - 70, 1816, 8, 70, 1, 70, 3, 70, 1819, 8, 70, 1, 70, 1, 70, 3, 70, 1823, - 8, 70, 1, 71, 1, 71, 1, 72, 1, 72, 1, 72, 1, 72, 1, 72, 1, 72, 1, 72, 1, - 72, 1, 72, 1, 72, 3, 72, 1837, 8, 72, 1, 73, 1, 73, 1, 73, 1, 73, 1, 73, - 1, 73, 1, 73, 1, 73, 1, 73, 1, 73, 3, 73, 1849, 8, 73, 1, 74, 1, 74, 1, - 74, 1, 74, 1, 74, 1, 74, 1, 74, 3, 74, 1858, 8, 74, 1, 75, 1, 75, 1, 75, - 1, 75, 1, 75, 1, 75, 1, 75, 3, 75, 1867, 8, 75, 1, 75, 1, 75, 3, 75, 1871, - 8, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 3, 75, 1881, - 8, 75, 1, 75, 3, 75, 1884, 8, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, - 75, 1, 75, 3, 75, 1893, 8, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, - 1, 75, 3, 75, 1902, 8, 75, 1, 75, 3, 75, 1905, 8, 75, 1, 75, 1, 75, 1, - 75, 1, 75, 3, 75, 1911, 8, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, - 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 3, 75, 1925, 8, 75, 1, 75, 1, - 75, 3, 75, 1929, 8, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, - 1, 75, 1, 75, 3, 75, 1940, 8, 75, 1, 75, 1, 75, 1, 75, 3, 75, 1945, 8, - 75, 1, 76, 1, 76, 1, 76, 1, 77, 1, 77, 1, 77, 1, 78, 1, 78, 1, 78, 4, 78, - 1956, 8, 78, 11, 78, 12, 78, 1957, 1, 79, 1, 79, 1, 79, 4, 79, 1963, 8, - 79, 11, 79, 12, 79, 1964, 1, 80, 1, 80, 1, 80, 1, 80, 1, 81, 1, 81, 3, - 81, 1973, 8, 81, 1, 81, 1, 81, 1, 81, 3, 81, 1978, 8, 81, 5, 81, 1980, - 8, 81, 10, 81, 12, 81, 1983, 9, 81, 1, 82, 1, 82, 1, 83, 1, 83, 1, 84, - 1, 84, 1, 85, 1, 85, 1, 86, 1, 86, 3, 86, 1995, 8, 86, 1, 87, 1, 87, 1, - 88, 1, 88, 1, 89, 1, 89, 1, 90, 1, 90, 1, 91, 1, 91, 1, 92, 1, 92, 1, 93, - 1, 93, 1, 94, 1, 94, 1, 95, 1, 95, 1, 96, 1, 96, 1, 97, 1, 97, 1, 98, 1, - 98, 1, 99, 1, 99, 1, 100, 1, 100, 1, 101, 1, 101, 1, 102, 1, 102, 1, 103, - 1, 103, 1, 104, 1, 104, 1, 105, 1, 105, 1, 106, 1, 106, 1, 107, 1, 107, - 1, 108, 1, 108, 1, 109, 1, 109, 1, 110, 1, 110, 1, 111, 1, 111, 1, 112, - 1, 112, 1, 112, 1, 112, 1, 112, 1, 112, 1, 112, 3, 112, 2054, 8, 112, 1, - 112, 2, 440, 472, 1, 64, 113, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, - 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, - 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, - 96, 98, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, - 126, 128, 130, 132, 134, 136, 138, 140, 142, 144, 146, 148, 150, 152, 154, - 156, 158, 160, 162, 164, 166, 168, 170, 172, 174, 176, 178, 180, 182, 184, - 186, 188, 190, 192, 194, 196, 198, 200, 202, 204, 206, 208, 210, 212, 214, - 216, 218, 220, 222, 224, 0, 28, 3, 0, 58, 58, 69, 69, 82, 82, 2, 0, 47, - 47, 66, 66, 1, 0, 133, 134, 2, 0, 146, 146, 171, 171, 1, 0, 8, 9, 2, 0, - 59, 59, 141, 141, 2, 0, 56, 56, 104, 104, 2, 0, 58, 58, 82, 82, 5, 0, 25, - 25, 72, 72, 81, 81, 122, 122, 126, 126, 4, 0, 84, 84, 132, 132, 138, 138, - 145, 145, 2, 0, 7, 7, 12, 13, 1, 0, 14, 17, 1, 0, 18, 21, 4, 0, 77, 77, - 97, 97, 99, 99, 118, 118, 3, 0, 25, 25, 72, 72, 126, 126, 5, 0, 52, 54, - 104, 104, 172, 173, 186, 186, 188, 189, 2, 0, 29, 29, 62, 62, 3, 0, 128, - 128, 154, 154, 179, 179, 2, 0, 5, 5, 106, 106, 1, 0, 176, 177, 2, 0, 34, - 34, 60, 60, 2, 0, 151, 151, 162, 162, 2, 0, 159, 159, 166, 166, 2, 0, 160, - 160, 167, 168, 2, 0, 161, 161, 163, 163, 2, 0, 8, 10, 102, 102, 2, 0, 185, - 185, 188, 188, 2, 0, 25, 123, 125, 180, 2338, 0, 229, 1, 0, 0, 0, 2, 237, - 1, 0, 0, 0, 4, 263, 1, 0, 0, 0, 6, 291, 1, 0, 0, 0, 8, 323, 1, 0, 0, 0, - 10, 333, 1, 0, 0, 0, 12, 341, 1, 0, 0, 0, 14, 351, 1, 0, 0, 0, 16, 355, - 1, 0, 0, 0, 18, 366, 1, 0, 0, 0, 20, 369, 1, 0, 0, 0, 22, 375, 1, 0, 0, - 0, 24, 409, 1, 0, 0, 0, 26, 418, 1, 0, 0, 0, 28, 459, 1, 0, 0, 0, 30, 470, - 1, 0, 0, 0, 32, 488, 1, 0, 0, 0, 34, 542, 1, 0, 0, 0, 36, 548, 1, 0, 0, - 0, 38, 589, 1, 0, 0, 0, 40, 631, 1, 0, 0, 0, 42, 635, 1, 0, 0, 0, 44, 699, - 1, 0, 0, 0, 46, 731, 1, 0, 0, 0, 48, 760, 1, 0, 0, 0, 50, 781, 1, 0, 0, - 0, 52, 795, 1, 0, 0, 0, 54, 806, 1, 0, 0, 0, 56, 826, 1, 0, 0, 0, 58, 839, - 1, 0, 0, 0, 60, 857, 1, 0, 0, 0, 62, 863, 1, 0, 0, 0, 64, 964, 1, 0, 0, - 0, 66, 1088, 1, 0, 0, 0, 68, 1098, 1, 0, 0, 0, 70, 1100, 1, 0, 0, 0, 72, - 1111, 1, 0, 0, 0, 74, 1121, 1, 0, 0, 0, 76, 1168, 1, 0, 0, 0, 78, 1177, - 1, 0, 0, 0, 80, 1224, 1, 0, 0, 0, 82, 1242, 1, 0, 0, 0, 84, 1244, 1, 0, - 0, 0, 86, 1258, 1, 0, 0, 0, 88, 1275, 1, 0, 0, 0, 90, 1349, 1, 0, 0, 0, - 92, 1351, 1, 0, 0, 0, 94, 1354, 1, 0, 0, 0, 96, 1364, 1, 0, 0, 0, 98, 1450, - 1, 0, 0, 0, 100, 1464, 1, 0, 0, 0, 102, 1479, 1, 0, 0, 0, 104, 1495, 1, - 0, 0, 0, 106, 1503, 1, 0, 0, 0, 108, 1506, 1, 0, 0, 0, 110, 1555, 1, 0, - 0, 0, 112, 1567, 1, 0, 0, 0, 114, 1611, 1, 0, 0, 0, 116, 1625, 1, 0, 0, - 0, 118, 1633, 1, 0, 0, 0, 120, 1639, 1, 0, 0, 0, 122, 1670, 1, 0, 0, 0, - 124, 1706, 1, 0, 0, 0, 126, 1718, 1, 0, 0, 0, 128, 1727, 1, 0, 0, 0, 130, - 1742, 1, 0, 0, 0, 132, 1762, 1, 0, 0, 0, 134, 1784, 1, 0, 0, 0, 136, 1796, - 1, 0, 0, 0, 138, 1806, 1, 0, 0, 0, 140, 1812, 1, 0, 0, 0, 142, 1824, 1, - 0, 0, 0, 144, 1836, 1, 0, 0, 0, 146, 1848, 1, 0, 0, 0, 148, 1857, 1, 0, - 0, 0, 150, 1944, 1, 0, 0, 0, 152, 1946, 1, 0, 0, 0, 154, 1949, 1, 0, 0, - 0, 156, 1952, 1, 0, 0, 0, 158, 1959, 1, 0, 0, 0, 160, 1966, 1, 0, 0, 0, - 162, 1970, 1, 0, 0, 0, 164, 1984, 1, 0, 0, 0, 166, 1986, 1, 0, 0, 0, 168, - 1988, 1, 0, 0, 0, 170, 1990, 1, 0, 0, 0, 172, 1994, 1, 0, 0, 0, 174, 1996, - 1, 0, 0, 0, 176, 1998, 1, 0, 0, 0, 178, 2000, 1, 0, 0, 0, 180, 2002, 1, - 0, 0, 0, 182, 2004, 1, 0, 0, 0, 184, 2006, 1, 0, 0, 0, 186, 2008, 1, 0, - 0, 0, 188, 2010, 1, 0, 0, 0, 190, 2012, 1, 0, 0, 0, 192, 2014, 1, 0, 0, - 0, 194, 2016, 1, 0, 0, 0, 196, 2018, 1, 0, 0, 0, 198, 2020, 1, 0, 0, 0, - 200, 2022, 1, 0, 0, 0, 202, 2024, 1, 0, 0, 0, 204, 2026, 1, 0, 0, 0, 206, - 2028, 1, 0, 0, 0, 208, 2030, 1, 0, 0, 0, 210, 2032, 1, 0, 0, 0, 212, 2034, - 1, 0, 0, 0, 214, 2036, 1, 0, 0, 0, 216, 2038, 1, 0, 0, 0, 218, 2040, 1, - 0, 0, 0, 220, 2042, 1, 0, 0, 0, 222, 2044, 1, 0, 0, 0, 224, 2053, 1, 0, - 0, 0, 226, 228, 3, 2, 1, 0, 227, 226, 1, 0, 0, 0, 228, 231, 1, 0, 0, 0, - 229, 227, 1, 0, 0, 0, 229, 230, 1, 0, 0, 0, 230, 232, 1, 0, 0, 0, 231, - 229, 1, 0, 0, 0, 232, 233, 5, 0, 0, 1, 233, 1, 1, 0, 0, 0, 234, 236, 5, - 1, 0, 0, 235, 234, 1, 0, 0, 0, 236, 239, 1, 0, 0, 0, 237, 235, 1, 0, 0, - 0, 237, 238, 1, 0, 0, 0, 238, 240, 1, 0, 0, 0, 239, 237, 1, 0, 0, 0, 240, - 249, 3, 4, 2, 0, 241, 243, 5, 1, 0, 0, 242, 241, 1, 0, 0, 0, 243, 244, - 1, 0, 0, 0, 244, 242, 1, 0, 0, 0, 244, 245, 1, 0, 0, 0, 245, 246, 1, 0, - 0, 0, 246, 248, 3, 4, 2, 0, 247, 242, 1, 0, 0, 0, 248, 251, 1, 0, 0, 0, - 249, 247, 1, 0, 0, 0, 249, 250, 1, 0, 0, 0, 250, 255, 1, 0, 0, 0, 251, - 249, 1, 0, 0, 0, 252, 254, 5, 1, 0, 0, 253, 252, 1, 0, 0, 0, 254, 257, - 1, 0, 0, 0, 255, 253, 1, 0, 0, 0, 255, 256, 1, 0, 0, 0, 256, 3, 1, 0, 0, - 0, 257, 255, 1, 0, 0, 0, 258, 261, 5, 71, 0, 0, 259, 260, 5, 114, 0, 0, - 260, 262, 5, 111, 0, 0, 261, 259, 1, 0, 0, 0, 261, 262, 1, 0, 0, 0, 262, - 264, 1, 0, 0, 0, 263, 258, 1, 0, 0, 0, 263, 264, 1, 0, 0, 0, 264, 289, - 1, 0, 0, 0, 265, 290, 3, 6, 3, 0, 266, 290, 3, 8, 4, 0, 267, 290, 3, 10, - 5, 0, 268, 290, 3, 12, 6, 0, 269, 290, 3, 14, 7, 0, 270, 290, 3, 22, 11, - 0, 271, 290, 3, 26, 13, 0, 272, 290, 3, 42, 21, 0, 273, 290, 3, 44, 22, - 0, 274, 290, 3, 46, 23, 0, 275, 290, 3, 56, 28, 0, 276, 290, 3, 58, 29, - 0, 277, 290, 3, 60, 30, 0, 278, 290, 3, 62, 31, 0, 279, 290, 3, 74, 37, - 0, 280, 290, 3, 80, 40, 0, 281, 290, 3, 84, 42, 0, 282, 290, 3, 20, 10, - 0, 283, 290, 3, 16, 8, 0, 284, 290, 3, 18, 9, 0, 285, 290, 3, 86, 43, 0, - 286, 290, 3, 108, 54, 0, 287, 290, 3, 112, 56, 0, 288, 290, 3, 116, 58, - 0, 289, 265, 1, 0, 0, 0, 289, 266, 1, 0, 0, 0, 289, 267, 1, 0, 0, 0, 289, - 268, 1, 0, 0, 0, 289, 269, 1, 0, 0, 0, 289, 270, 1, 0, 0, 0, 289, 271, - 1, 0, 0, 0, 289, 272, 1, 0, 0, 0, 289, 273, 1, 0, 0, 0, 289, 274, 1, 0, - 0, 0, 289, 275, 1, 0, 0, 0, 289, 276, 1, 0, 0, 0, 289, 277, 1, 0, 0, 0, - 289, 278, 1, 0, 0, 0, 289, 279, 1, 0, 0, 0, 289, 280, 1, 0, 0, 0, 289, - 281, 1, 0, 0, 0, 289, 282, 1, 0, 0, 0, 289, 283, 1, 0, 0, 0, 289, 284, - 1, 0, 0, 0, 289, 285, 1, 0, 0, 0, 289, 286, 1, 0, 0, 0, 289, 287, 1, 0, - 0, 0, 289, 288, 1, 0, 0, 0, 290, 5, 1, 0, 0, 0, 291, 292, 5, 30, 0, 0, - 292, 296, 5, 132, 0, 0, 293, 294, 3, 182, 91, 0, 294, 295, 5, 2, 0, 0, - 295, 297, 1, 0, 0, 0, 296, 293, 1, 0, 0, 0, 296, 297, 1, 0, 0, 0, 297, - 298, 1, 0, 0, 0, 298, 321, 3, 184, 92, 0, 299, 309, 5, 121, 0, 0, 300, - 301, 5, 136, 0, 0, 301, 310, 3, 184, 92, 0, 302, 304, 5, 46, 0, 0, 303, - 302, 1, 0, 0, 0, 303, 304, 1, 0, 0, 0, 304, 305, 1, 0, 0, 0, 305, 306, - 3, 188, 94, 0, 306, 307, 5, 136, 0, 0, 307, 308, 3, 188, 94, 0, 308, 310, - 1, 0, 0, 0, 309, 300, 1, 0, 0, 0, 309, 303, 1, 0, 0, 0, 310, 322, 1, 0, - 0, 0, 311, 313, 5, 27, 0, 0, 312, 314, 5, 46, 0, 0, 313, 312, 1, 0, 0, - 0, 313, 314, 1, 0, 0, 0, 314, 315, 1, 0, 0, 0, 315, 322, 3, 28, 14, 0, - 316, 318, 5, 63, 0, 0, 317, 319, 5, 46, 0, 0, 318, 317, 1, 0, 0, 0, 318, - 319, 1, 0, 0, 0, 319, 320, 1, 0, 0, 0, 320, 322, 3, 188, 94, 0, 321, 299, - 1, 0, 0, 0, 321, 311, 1, 0, 0, 0, 321, 316, 1, 0, 0, 0, 322, 7, 1, 0, 0, - 0, 323, 331, 5, 31, 0, 0, 324, 332, 3, 182, 91, 0, 325, 326, 3, 182, 91, - 0, 326, 327, 5, 2, 0, 0, 327, 329, 1, 0, 0, 0, 328, 325, 1, 0, 0, 0, 328, - 329, 1, 0, 0, 0, 329, 330, 1, 0, 0, 0, 330, 332, 3, 186, 93, 0, 331, 324, - 1, 0, 0, 0, 331, 328, 1, 0, 0, 0, 331, 332, 1, 0, 0, 0, 332, 9, 1, 0, 0, - 0, 333, 335, 5, 35, 0, 0, 334, 336, 5, 55, 0, 0, 335, 334, 1, 0, 0, 0, - 335, 336, 1, 0, 0, 0, 336, 337, 1, 0, 0, 0, 337, 338, 3, 64, 32, 0, 338, - 339, 5, 33, 0, 0, 339, 340, 3, 182, 91, 0, 340, 11, 1, 0, 0, 0, 341, 343, - 5, 38, 0, 0, 342, 344, 7, 0, 0, 0, 343, 342, 1, 0, 0, 0, 343, 344, 1, 0, - 0, 0, 344, 349, 1, 0, 0, 0, 345, 347, 5, 137, 0, 0, 346, 348, 3, 208, 104, - 0, 347, 346, 1, 0, 0, 0, 347, 348, 1, 0, 0, 0, 348, 350, 1, 0, 0, 0, 349, - 345, 1, 0, 0, 0, 349, 350, 1, 0, 0, 0, 350, 13, 1, 0, 0, 0, 351, 353, 7, - 1, 0, 0, 352, 354, 5, 137, 0, 0, 353, 352, 1, 0, 0, 0, 353, 354, 1, 0, - 0, 0, 354, 15, 1, 0, 0, 0, 355, 357, 5, 126, 0, 0, 356, 358, 5, 137, 0, - 0, 357, 356, 1, 0, 0, 0, 357, 358, 1, 0, 0, 0, 358, 364, 1, 0, 0, 0, 359, - 361, 5, 136, 0, 0, 360, 362, 5, 129, 0, 0, 361, 360, 1, 0, 0, 0, 361, 362, - 1, 0, 0, 0, 362, 363, 1, 0, 0, 0, 363, 365, 3, 204, 102, 0, 364, 359, 1, - 0, 0, 0, 364, 365, 1, 0, 0, 0, 365, 17, 1, 0, 0, 0, 366, 367, 5, 129, 0, - 0, 367, 368, 3, 204, 102, 0, 368, 19, 1, 0, 0, 0, 369, 371, 5, 120, 0, - 0, 370, 372, 5, 129, 0, 0, 371, 370, 1, 0, 0, 0, 371, 372, 1, 0, 0, 0, - 372, 373, 1, 0, 0, 0, 373, 374, 3, 204, 102, 0, 374, 21, 1, 0, 0, 0, 375, - 377, 5, 50, 0, 0, 376, 378, 5, 140, 0, 0, 377, 376, 1, 0, 0, 0, 377, 378, - 1, 0, 0, 0, 378, 379, 1, 0, 0, 0, 379, 383, 5, 84, 0, 0, 380, 381, 5, 80, - 0, 0, 381, 382, 5, 102, 0, 0, 382, 384, 5, 70, 0, 0, 383, 380, 1, 0, 0, - 0, 383, 384, 1, 0, 0, 0, 384, 388, 1, 0, 0, 0, 385, 386, 3, 182, 91, 0, - 386, 387, 5, 2, 0, 0, 387, 389, 1, 0, 0, 0, 388, 385, 1, 0, 0, 0, 388, - 389, 1, 0, 0, 0, 389, 390, 1, 0, 0, 0, 390, 391, 3, 194, 97, 0, 391, 392, - 5, 107, 0, 0, 392, 393, 3, 184, 92, 0, 393, 394, 5, 3, 0, 0, 394, 399, - 3, 24, 12, 0, 395, 396, 5, 5, 0, 0, 396, 398, 3, 24, 12, 0, 397, 395, 1, - 0, 0, 0, 398, 401, 1, 0, 0, 0, 399, 397, 1, 0, 0, 0, 399, 400, 1, 0, 0, - 0, 400, 402, 1, 0, 0, 0, 401, 399, 1, 0, 0, 0, 402, 405, 5, 4, 0, 0, 403, - 404, 5, 148, 0, 0, 404, 406, 3, 64, 32, 0, 405, 403, 1, 0, 0, 0, 405, 406, - 1, 0, 0, 0, 406, 23, 1, 0, 0, 0, 407, 410, 3, 188, 94, 0, 408, 410, 3, - 64, 32, 0, 409, 407, 1, 0, 0, 0, 409, 408, 1, 0, 0, 0, 410, 413, 1, 0, - 0, 0, 411, 412, 5, 45, 0, 0, 412, 414, 3, 190, 95, 0, 413, 411, 1, 0, 0, - 0, 413, 414, 1, 0, 0, 0, 414, 416, 1, 0, 0, 0, 415, 417, 3, 142, 71, 0, - 416, 415, 1, 0, 0, 0, 416, 417, 1, 0, 0, 0, 417, 25, 1, 0, 0, 0, 418, 420, - 5, 50, 0, 0, 419, 421, 7, 2, 0, 0, 420, 419, 1, 0, 0, 0, 420, 421, 1, 0, - 0, 0, 421, 422, 1, 0, 0, 0, 422, 426, 5, 132, 0, 0, 423, 424, 5, 80, 0, - 0, 424, 425, 5, 102, 0, 0, 425, 427, 5, 70, 0, 0, 426, 423, 1, 0, 0, 0, - 426, 427, 1, 0, 0, 0, 427, 431, 1, 0, 0, 0, 428, 429, 3, 182, 91, 0, 429, - 430, 5, 2, 0, 0, 430, 432, 1, 0, 0, 0, 431, 428, 1, 0, 0, 0, 431, 432, - 1, 0, 0, 0, 432, 433, 1, 0, 0, 0, 433, 457, 3, 184, 92, 0, 434, 435, 5, - 3, 0, 0, 435, 440, 3, 28, 14, 0, 436, 437, 5, 5, 0, 0, 437, 439, 3, 28, - 14, 0, 438, 436, 1, 0, 0, 0, 439, 442, 1, 0, 0, 0, 440, 441, 1, 0, 0, 0, - 440, 438, 1, 0, 0, 0, 441, 447, 1, 0, 0, 0, 442, 440, 1, 0, 0, 0, 443, - 444, 5, 5, 0, 0, 444, 446, 3, 36, 18, 0, 445, 443, 1, 0, 0, 0, 446, 449, - 1, 0, 0, 0, 447, 445, 1, 0, 0, 0, 447, 448, 1, 0, 0, 0, 448, 450, 1, 0, - 0, 0, 449, 447, 1, 0, 0, 0, 450, 453, 5, 4, 0, 0, 451, 452, 5, 150, 0, - 0, 452, 454, 5, 185, 0, 0, 453, 451, 1, 0, 0, 0, 453, 454, 1, 0, 0, 0, - 454, 458, 1, 0, 0, 0, 455, 456, 5, 33, 0, 0, 456, 458, 3, 86, 43, 0, 457, - 434, 1, 0, 0, 0, 457, 455, 1, 0, 0, 0, 458, 27, 1, 0, 0, 0, 459, 461, 3, - 188, 94, 0, 460, 462, 3, 30, 15, 0, 461, 460, 1, 0, 0, 0, 461, 462, 1, - 0, 0, 0, 462, 466, 1, 0, 0, 0, 463, 465, 3, 32, 16, 0, 464, 463, 1, 0, - 0, 0, 465, 468, 1, 0, 0, 0, 466, 464, 1, 0, 0, 0, 466, 467, 1, 0, 0, 0, - 467, 29, 1, 0, 0, 0, 468, 466, 1, 0, 0, 0, 469, 471, 3, 178, 89, 0, 470, - 469, 1, 0, 0, 0, 471, 472, 1, 0, 0, 0, 472, 473, 1, 0, 0, 0, 472, 470, - 1, 0, 0, 0, 473, 484, 1, 0, 0, 0, 474, 475, 5, 3, 0, 0, 475, 476, 3, 34, - 17, 0, 476, 477, 5, 4, 0, 0, 477, 485, 1, 0, 0, 0, 478, 479, 5, 3, 0, 0, - 479, 480, 3, 34, 17, 0, 480, 481, 5, 5, 0, 0, 481, 482, 3, 34, 17, 0, 482, - 483, 5, 4, 0, 0, 483, 485, 1, 0, 0, 0, 484, 474, 1, 0, 0, 0, 484, 478, - 1, 0, 0, 0, 484, 485, 1, 0, 0, 0, 485, 31, 1, 0, 0, 0, 486, 487, 5, 49, - 0, 0, 487, 489, 3, 178, 89, 0, 488, 486, 1, 0, 0, 0, 488, 489, 1, 0, 0, - 0, 489, 539, 1, 0, 0, 0, 490, 491, 5, 113, 0, 0, 491, 493, 5, 95, 0, 0, - 492, 494, 3, 142, 71, 0, 493, 492, 1, 0, 0, 0, 493, 494, 1, 0, 0, 0, 494, - 496, 1, 0, 0, 0, 495, 497, 3, 40, 20, 0, 496, 495, 1, 0, 0, 0, 496, 497, - 1, 0, 0, 0, 497, 499, 1, 0, 0, 0, 498, 500, 5, 36, 0, 0, 499, 498, 1, 0, - 0, 0, 499, 500, 1, 0, 0, 0, 500, 540, 1, 0, 0, 0, 501, 503, 5, 102, 0, - 0, 502, 501, 1, 0, 0, 0, 502, 503, 1, 0, 0, 0, 503, 504, 1, 0, 0, 0, 504, - 507, 5, 104, 0, 0, 505, 507, 5, 140, 0, 0, 506, 502, 1, 0, 0, 0, 506, 505, - 1, 0, 0, 0, 507, 509, 1, 0, 0, 0, 508, 510, 3, 40, 20, 0, 509, 508, 1, - 0, 0, 0, 509, 510, 1, 0, 0, 0, 510, 540, 1, 0, 0, 0, 511, 512, 5, 44, 0, - 0, 512, 513, 5, 3, 0, 0, 513, 514, 3, 64, 32, 0, 514, 515, 5, 4, 0, 0, - 515, 540, 1, 0, 0, 0, 516, 523, 5, 56, 0, 0, 517, 524, 3, 34, 17, 0, 518, - 524, 3, 68, 34, 0, 519, 520, 5, 3, 0, 0, 520, 521, 3, 64, 32, 0, 521, 522, - 5, 4, 0, 0, 522, 524, 1, 0, 0, 0, 523, 517, 1, 0, 0, 0, 523, 518, 1, 0, - 0, 0, 523, 519, 1, 0, 0, 0, 524, 540, 1, 0, 0, 0, 525, 526, 5, 45, 0, 0, - 526, 540, 3, 190, 95, 0, 527, 540, 3, 38, 19, 0, 528, 529, 5, 169, 0, 0, - 529, 531, 5, 170, 0, 0, 530, 528, 1, 0, 0, 0, 530, 531, 1, 0, 0, 0, 531, - 532, 1, 0, 0, 0, 532, 533, 5, 33, 0, 0, 533, 534, 5, 3, 0, 0, 534, 535, - 3, 64, 32, 0, 535, 537, 5, 4, 0, 0, 536, 538, 7, 3, 0, 0, 537, 536, 1, - 0, 0, 0, 537, 538, 1, 0, 0, 0, 538, 540, 1, 0, 0, 0, 539, 490, 1, 0, 0, - 0, 539, 506, 1, 0, 0, 0, 539, 511, 1, 0, 0, 0, 539, 516, 1, 0, 0, 0, 539, - 525, 1, 0, 0, 0, 539, 527, 1, 0, 0, 0, 539, 530, 1, 0, 0, 0, 540, 33, 1, - 0, 0, 0, 541, 543, 7, 4, 0, 0, 542, 541, 1, 0, 0, 0, 542, 543, 1, 0, 0, - 0, 543, 544, 1, 0, 0, 0, 544, 545, 5, 186, 0, 0, 545, 35, 1, 0, 0, 0, 546, - 547, 5, 49, 0, 0, 547, 549, 3, 178, 89, 0, 548, 546, 1, 0, 0, 0, 548, 549, - 1, 0, 0, 0, 549, 587, 1, 0, 0, 0, 550, 551, 5, 113, 0, 0, 551, 554, 5, - 95, 0, 0, 552, 554, 5, 140, 0, 0, 553, 550, 1, 0, 0, 0, 553, 552, 1, 0, - 0, 0, 554, 555, 1, 0, 0, 0, 555, 556, 5, 3, 0, 0, 556, 561, 3, 24, 12, - 0, 557, 558, 5, 5, 0, 0, 558, 560, 3, 24, 12, 0, 559, 557, 1, 0, 0, 0, - 560, 563, 1, 0, 0, 0, 561, 559, 1, 0, 0, 0, 561, 562, 1, 0, 0, 0, 562, - 564, 1, 0, 0, 0, 563, 561, 1, 0, 0, 0, 564, 566, 5, 4, 0, 0, 565, 567, - 3, 40, 20, 0, 566, 565, 1, 0, 0, 0, 566, 567, 1, 0, 0, 0, 567, 588, 1, - 0, 0, 0, 568, 569, 5, 44, 0, 0, 569, 570, 5, 3, 0, 0, 570, 571, 3, 64, - 32, 0, 571, 572, 5, 4, 0, 0, 572, 588, 1, 0, 0, 0, 573, 574, 5, 74, 0, - 0, 574, 575, 5, 95, 0, 0, 575, 576, 5, 3, 0, 0, 576, 581, 3, 188, 94, 0, - 577, 578, 5, 5, 0, 0, 578, 580, 3, 188, 94, 0, 579, 577, 1, 0, 0, 0, 580, - 583, 1, 0, 0, 0, 581, 579, 1, 0, 0, 0, 581, 582, 1, 0, 0, 0, 582, 584, - 1, 0, 0, 0, 583, 581, 1, 0, 0, 0, 584, 585, 5, 4, 0, 0, 585, 586, 3, 38, - 19, 0, 586, 588, 1, 0, 0, 0, 587, 553, 1, 0, 0, 0, 587, 568, 1, 0, 0, 0, - 587, 573, 1, 0, 0, 0, 588, 37, 1, 0, 0, 0, 589, 590, 5, 117, 0, 0, 590, - 602, 3, 192, 96, 0, 591, 592, 5, 3, 0, 0, 592, 597, 3, 188, 94, 0, 593, - 594, 5, 5, 0, 0, 594, 596, 3, 188, 94, 0, 595, 593, 1, 0, 0, 0, 596, 599, - 1, 0, 0, 0, 597, 595, 1, 0, 0, 0, 597, 598, 1, 0, 0, 0, 598, 600, 1, 0, - 0, 0, 599, 597, 1, 0, 0, 0, 600, 601, 5, 4, 0, 0, 601, 603, 1, 0, 0, 0, - 602, 591, 1, 0, 0, 0, 602, 603, 1, 0, 0, 0, 603, 618, 1, 0, 0, 0, 604, - 605, 5, 107, 0, 0, 605, 612, 7, 5, 0, 0, 606, 607, 5, 131, 0, 0, 607, 613, - 7, 6, 0, 0, 608, 613, 5, 41, 0, 0, 609, 613, 5, 123, 0, 0, 610, 611, 5, - 101, 0, 0, 611, 613, 5, 26, 0, 0, 612, 606, 1, 0, 0, 0, 612, 608, 1, 0, - 0, 0, 612, 609, 1, 0, 0, 0, 612, 610, 1, 0, 0, 0, 613, 617, 1, 0, 0, 0, - 614, 615, 5, 99, 0, 0, 615, 617, 3, 178, 89, 0, 616, 604, 1, 0, 0, 0, 616, - 614, 1, 0, 0, 0, 617, 620, 1, 0, 0, 0, 618, 616, 1, 0, 0, 0, 618, 619, - 1, 0, 0, 0, 619, 629, 1, 0, 0, 0, 620, 618, 1, 0, 0, 0, 621, 623, 5, 102, - 0, 0, 622, 621, 1, 0, 0, 0, 622, 623, 1, 0, 0, 0, 623, 624, 1, 0, 0, 0, - 624, 627, 5, 57, 0, 0, 625, 626, 5, 86, 0, 0, 626, 628, 7, 7, 0, 0, 627, - 625, 1, 0, 0, 0, 627, 628, 1, 0, 0, 0, 628, 630, 1, 0, 0, 0, 629, 622, - 1, 0, 0, 0, 629, 630, 1, 0, 0, 0, 630, 39, 1, 0, 0, 0, 631, 632, 5, 107, - 0, 0, 632, 633, 5, 48, 0, 0, 633, 634, 7, 8, 0, 0, 634, 41, 1, 0, 0, 0, - 635, 637, 5, 50, 0, 0, 636, 638, 7, 2, 0, 0, 637, 636, 1, 0, 0, 0, 637, - 638, 1, 0, 0, 0, 638, 639, 1, 0, 0, 0, 639, 643, 5, 138, 0, 0, 640, 641, - 5, 80, 0, 0, 641, 642, 5, 102, 0, 0, 642, 644, 5, 70, 0, 0, 643, 640, 1, - 0, 0, 0, 643, 644, 1, 0, 0, 0, 644, 648, 1, 0, 0, 0, 645, 646, 3, 182, - 91, 0, 646, 647, 5, 2, 0, 0, 647, 649, 1, 0, 0, 0, 648, 645, 1, 0, 0, 0, - 648, 649, 1, 0, 0, 0, 649, 650, 1, 0, 0, 0, 650, 655, 3, 196, 98, 0, 651, - 656, 5, 37, 0, 0, 652, 656, 5, 28, 0, 0, 653, 654, 5, 89, 0, 0, 654, 656, - 5, 105, 0, 0, 655, 651, 1, 0, 0, 0, 655, 652, 1, 0, 0, 0, 655, 653, 1, - 0, 0, 0, 655, 656, 1, 0, 0, 0, 656, 671, 1, 0, 0, 0, 657, 672, 5, 59, 0, - 0, 658, 672, 5, 88, 0, 0, 659, 669, 5, 141, 0, 0, 660, 661, 5, 105, 0, - 0, 661, 666, 3, 188, 94, 0, 662, 663, 5, 5, 0, 0, 663, 665, 3, 188, 94, - 0, 664, 662, 1, 0, 0, 0, 665, 668, 1, 0, 0, 0, 666, 664, 1, 0, 0, 0, 666, - 667, 1, 0, 0, 0, 667, 670, 1, 0, 0, 0, 668, 666, 1, 0, 0, 0, 669, 660, - 1, 0, 0, 0, 669, 670, 1, 0, 0, 0, 670, 672, 1, 0, 0, 0, 671, 657, 1, 0, - 0, 0, 671, 658, 1, 0, 0, 0, 671, 659, 1, 0, 0, 0, 672, 673, 1, 0, 0, 0, - 673, 674, 5, 107, 0, 0, 674, 678, 3, 184, 92, 0, 675, 676, 5, 73, 0, 0, - 676, 677, 5, 64, 0, 0, 677, 679, 5, 127, 0, 0, 678, 675, 1, 0, 0, 0, 678, - 679, 1, 0, 0, 0, 679, 682, 1, 0, 0, 0, 680, 681, 5, 147, 0, 0, 681, 683, - 3, 64, 32, 0, 682, 680, 1, 0, 0, 0, 682, 683, 1, 0, 0, 0, 683, 684, 1, - 0, 0, 0, 684, 693, 5, 38, 0, 0, 685, 690, 3, 108, 54, 0, 686, 690, 3, 74, - 37, 0, 687, 690, 3, 56, 28, 0, 688, 690, 3, 86, 43, 0, 689, 685, 1, 0, - 0, 0, 689, 686, 1, 0, 0, 0, 689, 687, 1, 0, 0, 0, 689, 688, 1, 0, 0, 0, - 690, 691, 1, 0, 0, 0, 691, 692, 5, 1, 0, 0, 692, 694, 1, 0, 0, 0, 693, - 689, 1, 0, 0, 0, 694, 695, 1, 0, 0, 0, 695, 693, 1, 0, 0, 0, 695, 696, - 1, 0, 0, 0, 696, 697, 1, 0, 0, 0, 697, 698, 5, 66, 0, 0, 698, 43, 1, 0, - 0, 0, 699, 701, 5, 50, 0, 0, 700, 702, 7, 2, 0, 0, 701, 700, 1, 0, 0, 0, - 701, 702, 1, 0, 0, 0, 702, 703, 1, 0, 0, 0, 703, 707, 5, 145, 0, 0, 704, - 705, 5, 80, 0, 0, 705, 706, 5, 102, 0, 0, 706, 708, 5, 70, 0, 0, 707, 704, - 1, 0, 0, 0, 707, 708, 1, 0, 0, 0, 708, 712, 1, 0, 0, 0, 709, 710, 3, 182, - 91, 0, 710, 711, 5, 2, 0, 0, 711, 713, 1, 0, 0, 0, 712, 709, 1, 0, 0, 0, - 712, 713, 1, 0, 0, 0, 713, 714, 1, 0, 0, 0, 714, 726, 3, 198, 99, 0, 715, - 716, 5, 3, 0, 0, 716, 721, 3, 188, 94, 0, 717, 718, 5, 5, 0, 0, 718, 720, - 3, 188, 94, 0, 719, 717, 1, 0, 0, 0, 720, 723, 1, 0, 0, 0, 721, 719, 1, - 0, 0, 0, 721, 722, 1, 0, 0, 0, 722, 724, 1, 0, 0, 0, 723, 721, 1, 0, 0, - 0, 724, 725, 5, 4, 0, 0, 725, 727, 1, 0, 0, 0, 726, 715, 1, 0, 0, 0, 726, - 727, 1, 0, 0, 0, 727, 728, 1, 0, 0, 0, 728, 729, 5, 33, 0, 0, 729, 730, - 3, 86, 43, 0, 730, 45, 1, 0, 0, 0, 731, 732, 5, 50, 0, 0, 732, 733, 5, - 146, 0, 0, 733, 737, 5, 132, 0, 0, 734, 735, 5, 80, 0, 0, 735, 736, 5, - 102, 0, 0, 736, 738, 5, 70, 0, 0, 737, 734, 1, 0, 0, 0, 737, 738, 1, 0, - 0, 0, 738, 742, 1, 0, 0, 0, 739, 740, 3, 182, 91, 0, 740, 741, 5, 2, 0, - 0, 741, 743, 1, 0, 0, 0, 742, 739, 1, 0, 0, 0, 742, 743, 1, 0, 0, 0, 743, - 744, 1, 0, 0, 0, 744, 745, 3, 184, 92, 0, 745, 746, 5, 142, 0, 0, 746, - 758, 3, 200, 100, 0, 747, 748, 5, 3, 0, 0, 748, 753, 3, 172, 86, 0, 749, - 750, 5, 5, 0, 0, 750, 752, 3, 172, 86, 0, 751, 749, 1, 0, 0, 0, 752, 755, - 1, 0, 0, 0, 753, 751, 1, 0, 0, 0, 753, 754, 1, 0, 0, 0, 754, 756, 1, 0, - 0, 0, 755, 753, 1, 0, 0, 0, 756, 757, 5, 4, 0, 0, 757, 759, 1, 0, 0, 0, - 758, 747, 1, 0, 0, 0, 758, 759, 1, 0, 0, 0, 759, 47, 1, 0, 0, 0, 760, 762, - 5, 149, 0, 0, 761, 763, 5, 116, 0, 0, 762, 761, 1, 0, 0, 0, 762, 763, 1, - 0, 0, 0, 763, 764, 1, 0, 0, 0, 764, 765, 3, 50, 25, 0, 765, 766, 5, 33, - 0, 0, 766, 767, 5, 3, 0, 0, 767, 768, 3, 86, 43, 0, 768, 778, 5, 4, 0, - 0, 769, 770, 5, 5, 0, 0, 770, 771, 3, 50, 25, 0, 771, 772, 5, 33, 0, 0, - 772, 773, 5, 3, 0, 0, 773, 774, 3, 86, 43, 0, 774, 775, 5, 4, 0, 0, 775, - 777, 1, 0, 0, 0, 776, 769, 1, 0, 0, 0, 777, 780, 1, 0, 0, 0, 778, 776, - 1, 0, 0, 0, 778, 779, 1, 0, 0, 0, 779, 49, 1, 0, 0, 0, 780, 778, 1, 0, - 0, 0, 781, 793, 3, 184, 92, 0, 782, 783, 5, 3, 0, 0, 783, 788, 3, 188, - 94, 0, 784, 785, 5, 5, 0, 0, 785, 787, 3, 188, 94, 0, 786, 784, 1, 0, 0, - 0, 787, 790, 1, 0, 0, 0, 788, 786, 1, 0, 0, 0, 788, 789, 1, 0, 0, 0, 789, - 791, 1, 0, 0, 0, 790, 788, 1, 0, 0, 0, 791, 792, 5, 4, 0, 0, 792, 794, - 1, 0, 0, 0, 793, 782, 1, 0, 0, 0, 793, 794, 1, 0, 0, 0, 794, 51, 1, 0, - 0, 0, 795, 796, 3, 50, 25, 0, 796, 797, 5, 33, 0, 0, 797, 798, 5, 3, 0, - 0, 798, 799, 3, 164, 82, 0, 799, 801, 5, 139, 0, 0, 800, 802, 5, 29, 0, - 0, 801, 800, 1, 0, 0, 0, 801, 802, 1, 0, 0, 0, 802, 803, 1, 0, 0, 0, 803, - 804, 3, 166, 83, 0, 804, 805, 5, 4, 0, 0, 805, 53, 1, 0, 0, 0, 806, 818, - 3, 184, 92, 0, 807, 808, 5, 3, 0, 0, 808, 813, 3, 188, 94, 0, 809, 810, - 5, 5, 0, 0, 810, 812, 3, 188, 94, 0, 811, 809, 1, 0, 0, 0, 812, 815, 1, - 0, 0, 0, 813, 811, 1, 0, 0, 0, 813, 814, 1, 0, 0, 0, 814, 816, 1, 0, 0, - 0, 815, 813, 1, 0, 0, 0, 816, 817, 5, 4, 0, 0, 817, 819, 1, 0, 0, 0, 818, - 807, 1, 0, 0, 0, 818, 819, 1, 0, 0, 0, 819, 820, 1, 0, 0, 0, 820, 821, - 5, 33, 0, 0, 821, 822, 5, 3, 0, 0, 822, 823, 3, 86, 43, 0, 823, 824, 5, - 4, 0, 0, 824, 55, 1, 0, 0, 0, 825, 827, 3, 48, 24, 0, 826, 825, 1, 0, 0, - 0, 826, 827, 1, 0, 0, 0, 827, 828, 1, 0, 0, 0, 828, 829, 5, 59, 0, 0, 829, - 830, 5, 75, 0, 0, 830, 833, 3, 114, 57, 0, 831, 832, 5, 148, 0, 0, 832, - 834, 3, 64, 32, 0, 833, 831, 1, 0, 0, 0, 833, 834, 1, 0, 0, 0, 834, 836, - 1, 0, 0, 0, 835, 837, 3, 76, 38, 0, 836, 835, 1, 0, 0, 0, 836, 837, 1, - 0, 0, 0, 837, 57, 1, 0, 0, 0, 838, 840, 3, 48, 24, 0, 839, 838, 1, 0, 0, - 0, 839, 840, 1, 0, 0, 0, 840, 841, 1, 0, 0, 0, 841, 842, 5, 59, 0, 0, 842, - 843, 5, 75, 0, 0, 843, 846, 3, 114, 57, 0, 844, 845, 5, 148, 0, 0, 845, - 847, 3, 64, 32, 0, 846, 844, 1, 0, 0, 0, 846, 847, 1, 0, 0, 0, 847, 849, - 1, 0, 0, 0, 848, 850, 3, 76, 38, 0, 849, 848, 1, 0, 0, 0, 849, 850, 1, - 0, 0, 0, 850, 855, 1, 0, 0, 0, 851, 853, 3, 136, 68, 0, 852, 851, 1, 0, - 0, 0, 852, 853, 1, 0, 0, 0, 853, 854, 1, 0, 0, 0, 854, 856, 3, 138, 69, - 0, 855, 852, 1, 0, 0, 0, 855, 856, 1, 0, 0, 0, 856, 59, 1, 0, 0, 0, 857, - 859, 5, 61, 0, 0, 858, 860, 5, 55, 0, 0, 859, 858, 1, 0, 0, 0, 859, 860, - 1, 0, 0, 0, 860, 861, 1, 0, 0, 0, 861, 862, 3, 182, 91, 0, 862, 61, 1, - 0, 0, 0, 863, 864, 5, 63, 0, 0, 864, 867, 7, 9, 0, 0, 865, 866, 5, 80, - 0, 0, 866, 868, 5, 70, 0, 0, 867, 865, 1, 0, 0, 0, 867, 868, 1, 0, 0, 0, - 868, 872, 1, 0, 0, 0, 869, 870, 3, 182, 91, 0, 870, 871, 5, 2, 0, 0, 871, - 873, 1, 0, 0, 0, 872, 869, 1, 0, 0, 0, 872, 873, 1, 0, 0, 0, 873, 874, - 1, 0, 0, 0, 874, 875, 3, 224, 112, 0, 875, 63, 1, 0, 0, 0, 876, 877, 6, - 32, -1, 0, 877, 965, 3, 68, 34, 0, 878, 965, 5, 187, 0, 0, 879, 880, 3, - 182, 91, 0, 880, 881, 5, 2, 0, 0, 881, 883, 1, 0, 0, 0, 882, 879, 1, 0, - 0, 0, 882, 883, 1, 0, 0, 0, 883, 884, 1, 0, 0, 0, 884, 885, 3, 184, 92, - 0, 885, 886, 5, 2, 0, 0, 886, 888, 1, 0, 0, 0, 887, 882, 1, 0, 0, 0, 887, - 888, 1, 0, 0, 0, 888, 889, 1, 0, 0, 0, 889, 965, 3, 188, 94, 0, 890, 891, - 3, 168, 84, 0, 891, 892, 3, 64, 32, 21, 892, 965, 1, 0, 0, 0, 893, 894, - 3, 180, 90, 0, 894, 907, 5, 3, 0, 0, 895, 897, 5, 62, 0, 0, 896, 895, 1, - 0, 0, 0, 896, 897, 1, 0, 0, 0, 897, 898, 1, 0, 0, 0, 898, 903, 3, 64, 32, - 0, 899, 900, 5, 5, 0, 0, 900, 902, 3, 64, 32, 0, 901, 899, 1, 0, 0, 0, - 902, 905, 1, 0, 0, 0, 903, 901, 1, 0, 0, 0, 903, 904, 1, 0, 0, 0, 904, - 908, 1, 0, 0, 0, 905, 903, 1, 0, 0, 0, 906, 908, 5, 7, 0, 0, 907, 896, - 1, 0, 0, 0, 907, 906, 1, 0, 0, 0, 907, 908, 1, 0, 0, 0, 908, 909, 1, 0, - 0, 0, 909, 911, 5, 4, 0, 0, 910, 912, 3, 118, 59, 0, 911, 910, 1, 0, 0, - 0, 911, 912, 1, 0, 0, 0, 912, 914, 1, 0, 0, 0, 913, 915, 3, 122, 61, 0, - 914, 913, 1, 0, 0, 0, 914, 915, 1, 0, 0, 0, 915, 965, 1, 0, 0, 0, 916, - 917, 5, 3, 0, 0, 917, 922, 3, 64, 32, 0, 918, 919, 5, 5, 0, 0, 919, 921, - 3, 64, 32, 0, 920, 918, 1, 0, 0, 0, 921, 924, 1, 0, 0, 0, 922, 920, 1, - 0, 0, 0, 922, 923, 1, 0, 0, 0, 923, 925, 1, 0, 0, 0, 924, 922, 1, 0, 0, - 0, 925, 926, 5, 4, 0, 0, 926, 965, 1, 0, 0, 0, 927, 928, 5, 43, 0, 0, 928, - 929, 5, 3, 0, 0, 929, 930, 3, 64, 32, 0, 930, 931, 5, 33, 0, 0, 931, 932, - 3, 30, 15, 0, 932, 933, 5, 4, 0, 0, 933, 965, 1, 0, 0, 0, 934, 936, 5, - 102, 0, 0, 935, 934, 1, 0, 0, 0, 935, 936, 1, 0, 0, 0, 936, 937, 1, 0, - 0, 0, 937, 939, 5, 70, 0, 0, 938, 935, 1, 0, 0, 0, 938, 939, 1, 0, 0, 0, - 939, 940, 1, 0, 0, 0, 940, 941, 5, 3, 0, 0, 941, 942, 3, 86, 43, 0, 942, - 943, 5, 4, 0, 0, 943, 965, 1, 0, 0, 0, 944, 946, 5, 42, 0, 0, 945, 947, - 3, 64, 32, 0, 946, 945, 1, 0, 0, 0, 946, 947, 1, 0, 0, 0, 947, 953, 1, - 0, 0, 0, 948, 949, 5, 147, 0, 0, 949, 950, 3, 64, 32, 0, 950, 951, 5, 135, - 0, 0, 951, 952, 3, 64, 32, 0, 952, 954, 1, 0, 0, 0, 953, 948, 1, 0, 0, - 0, 954, 955, 1, 0, 0, 0, 955, 953, 1, 0, 0, 0, 955, 956, 1, 0, 0, 0, 956, - 959, 1, 0, 0, 0, 957, 958, 5, 65, 0, 0, 958, 960, 3, 64, 32, 0, 959, 957, - 1, 0, 0, 0, 959, 960, 1, 0, 0, 0, 960, 961, 1, 0, 0, 0, 961, 962, 5, 66, - 0, 0, 962, 965, 1, 0, 0, 0, 963, 965, 3, 66, 33, 0, 964, 876, 1, 0, 0, - 0, 964, 878, 1, 0, 0, 0, 964, 887, 1, 0, 0, 0, 964, 890, 1, 0, 0, 0, 964, - 893, 1, 0, 0, 0, 964, 916, 1, 0, 0, 0, 964, 927, 1, 0, 0, 0, 964, 938, - 1, 0, 0, 0, 964, 944, 1, 0, 0, 0, 964, 963, 1, 0, 0, 0, 965, 1085, 1, 0, - 0, 0, 966, 967, 10, 20, 0, 0, 967, 968, 5, 11, 0, 0, 968, 1084, 3, 64, - 32, 21, 969, 970, 10, 19, 0, 0, 970, 971, 7, 10, 0, 0, 971, 1084, 3, 64, - 32, 20, 972, 973, 10, 18, 0, 0, 973, 974, 7, 4, 0, 0, 974, 1084, 3, 64, - 32, 19, 975, 976, 10, 17, 0, 0, 976, 977, 7, 11, 0, 0, 977, 1084, 3, 64, - 32, 18, 978, 979, 10, 16, 0, 0, 979, 980, 7, 12, 0, 0, 980, 1084, 3, 64, - 32, 17, 981, 994, 10, 15, 0, 0, 982, 995, 5, 6, 0, 0, 983, 995, 5, 22, - 0, 0, 984, 995, 5, 23, 0, 0, 985, 995, 5, 24, 0, 0, 986, 995, 5, 92, 0, - 0, 987, 988, 5, 92, 0, 0, 988, 995, 5, 102, 0, 0, 989, 995, 5, 83, 0, 0, - 990, 995, 5, 97, 0, 0, 991, 995, 5, 77, 0, 0, 992, 995, 5, 99, 0, 0, 993, - 995, 5, 118, 0, 0, 994, 982, 1, 0, 0, 0, 994, 983, 1, 0, 0, 0, 994, 984, - 1, 0, 0, 0, 994, 985, 1, 0, 0, 0, 994, 986, 1, 0, 0, 0, 994, 987, 1, 0, - 0, 0, 994, 989, 1, 0, 0, 0, 994, 990, 1, 0, 0, 0, 994, 991, 1, 0, 0, 0, - 994, 992, 1, 0, 0, 0, 994, 993, 1, 0, 0, 0, 995, 996, 1, 0, 0, 0, 996, - 1084, 3, 64, 32, 16, 997, 998, 10, 14, 0, 0, 998, 999, 5, 32, 0, 0, 999, - 1084, 3, 64, 32, 15, 1000, 1001, 10, 13, 0, 0, 1001, 1002, 5, 108, 0, 0, - 1002, 1084, 3, 64, 32, 14, 1003, 1004, 10, 6, 0, 0, 1004, 1006, 5, 92, - 0, 0, 1005, 1007, 5, 102, 0, 0, 1006, 1005, 1, 0, 0, 0, 1006, 1007, 1, - 0, 0, 0, 1007, 1008, 1, 0, 0, 0, 1008, 1084, 3, 64, 32, 7, 1009, 1011, - 10, 5, 0, 0, 1010, 1012, 5, 102, 0, 0, 1011, 1010, 1, 0, 0, 0, 1011, 1012, - 1, 0, 0, 0, 1012, 1013, 1, 0, 0, 0, 1013, 1014, 5, 39, 0, 0, 1014, 1015, - 3, 64, 32, 0, 1015, 1016, 5, 32, 0, 0, 1016, 1017, 3, 64, 32, 6, 1017, - 1084, 1, 0, 0, 0, 1018, 1019, 10, 9, 0, 0, 1019, 1020, 5, 45, 0, 0, 1020, - 1084, 3, 190, 95, 0, 1021, 1023, 10, 8, 0, 0, 1022, 1024, 5, 102, 0, 0, - 1023, 1022, 1, 0, 0, 0, 1023, 1024, 1, 0, 0, 0, 1024, 1025, 1, 0, 0, 0, - 1025, 1026, 7, 13, 0, 0, 1026, 1029, 3, 64, 32, 0, 1027, 1028, 5, 67, 0, - 0, 1028, 1030, 3, 64, 32, 0, 1029, 1027, 1, 0, 0, 0, 1029, 1030, 1, 0, - 0, 0, 1030, 1084, 1, 0, 0, 0, 1031, 1036, 10, 7, 0, 0, 1032, 1037, 5, 93, - 0, 0, 1033, 1037, 5, 103, 0, 0, 1034, 1035, 5, 102, 0, 0, 1035, 1037, 5, - 104, 0, 0, 1036, 1032, 1, 0, 0, 0, 1036, 1033, 1, 0, 0, 0, 1036, 1034, - 1, 0, 0, 0, 1037, 1084, 1, 0, 0, 0, 1038, 1040, 10, 4, 0, 0, 1039, 1041, - 5, 102, 0, 0, 1040, 1039, 1, 0, 0, 0, 1040, 1041, 1, 0, 0, 0, 1041, 1042, - 1, 0, 0, 0, 1042, 1081, 5, 83, 0, 0, 1043, 1053, 5, 3, 0, 0, 1044, 1054, - 3, 86, 43, 0, 1045, 1050, 3, 64, 32, 0, 1046, 1047, 5, 5, 0, 0, 1047, 1049, - 3, 64, 32, 0, 1048, 1046, 1, 0, 0, 0, 1049, 1052, 1, 0, 0, 0, 1050, 1048, - 1, 0, 0, 0, 1050, 1051, 1, 0, 0, 0, 1051, 1054, 1, 0, 0, 0, 1052, 1050, - 1, 0, 0, 0, 1053, 1044, 1, 0, 0, 0, 1053, 1045, 1, 0, 0, 0, 1053, 1054, - 1, 0, 0, 0, 1054, 1055, 1, 0, 0, 0, 1055, 1082, 5, 4, 0, 0, 1056, 1057, - 3, 182, 91, 0, 1057, 1058, 5, 2, 0, 0, 1058, 1060, 1, 0, 0, 0, 1059, 1056, - 1, 0, 0, 0, 1059, 1060, 1, 0, 0, 0, 1060, 1061, 1, 0, 0, 0, 1061, 1082, - 3, 184, 92, 0, 1062, 1063, 3, 182, 91, 0, 1063, 1064, 5, 2, 0, 0, 1064, - 1066, 1, 0, 0, 0, 1065, 1062, 1, 0, 0, 0, 1065, 1066, 1, 0, 0, 0, 1066, - 1067, 1, 0, 0, 0, 1067, 1068, 3, 222, 111, 0, 1068, 1077, 5, 3, 0, 0, 1069, - 1074, 3, 64, 32, 0, 1070, 1071, 5, 5, 0, 0, 1071, 1073, 3, 64, 32, 0, 1072, - 1070, 1, 0, 0, 0, 1073, 1076, 1, 0, 0, 0, 1074, 1072, 1, 0, 0, 0, 1074, - 1075, 1, 0, 0, 0, 1075, 1078, 1, 0, 0, 0, 1076, 1074, 1, 0, 0, 0, 1077, - 1069, 1, 0, 0, 0, 1077, 1078, 1, 0, 0, 0, 1078, 1079, 1, 0, 0, 0, 1079, - 1080, 5, 4, 0, 0, 1080, 1082, 1, 0, 0, 0, 1081, 1043, 1, 0, 0, 0, 1081, - 1059, 1, 0, 0, 0, 1081, 1065, 1, 0, 0, 0, 1082, 1084, 1, 0, 0, 0, 1083, - 966, 1, 0, 0, 0, 1083, 969, 1, 0, 0, 0, 1083, 972, 1, 0, 0, 0, 1083, 975, - 1, 0, 0, 0, 1083, 978, 1, 0, 0, 0, 1083, 981, 1, 0, 0, 0, 1083, 997, 1, - 0, 0, 0, 1083, 1000, 1, 0, 0, 0, 1083, 1003, 1, 0, 0, 0, 1083, 1009, 1, - 0, 0, 0, 1083, 1018, 1, 0, 0, 0, 1083, 1021, 1, 0, 0, 0, 1083, 1031, 1, - 0, 0, 0, 1083, 1038, 1, 0, 0, 0, 1084, 1087, 1, 0, 0, 0, 1085, 1083, 1, - 0, 0, 0, 1085, 1086, 1, 0, 0, 0, 1086, 65, 1, 0, 0, 0, 1087, 1085, 1, 0, - 0, 0, 1088, 1089, 5, 115, 0, 0, 1089, 1094, 5, 3, 0, 0, 1090, 1095, 5, - 81, 0, 0, 1091, 1092, 7, 14, 0, 0, 1092, 1093, 5, 5, 0, 0, 1093, 1095, - 3, 170, 85, 0, 1094, 1090, 1, 0, 0, 0, 1094, 1091, 1, 0, 0, 0, 1095, 1096, - 1, 0, 0, 0, 1096, 1097, 5, 4, 0, 0, 1097, 67, 1, 0, 0, 0, 1098, 1099, 7, - 15, 0, 0, 1099, 69, 1, 0, 0, 0, 1100, 1101, 5, 3, 0, 0, 1101, 1106, 3, - 64, 32, 0, 1102, 1103, 5, 5, 0, 0, 1103, 1105, 3, 64, 32, 0, 1104, 1102, - 1, 0, 0, 0, 1105, 1108, 1, 0, 0, 0, 1106, 1104, 1, 0, 0, 0, 1106, 1107, - 1, 0, 0, 0, 1107, 1109, 1, 0, 0, 0, 1108, 1106, 1, 0, 0, 0, 1109, 1110, - 5, 4, 0, 0, 1110, 71, 1, 0, 0, 0, 1111, 1112, 5, 144, 0, 0, 1112, 1117, - 3, 70, 35, 0, 1113, 1114, 5, 5, 0, 0, 1114, 1116, 3, 70, 35, 0, 1115, 1113, - 1, 0, 0, 0, 1116, 1119, 1, 0, 0, 0, 1117, 1115, 1, 0, 0, 0, 1117, 1118, - 1, 0, 0, 0, 1118, 73, 1, 0, 0, 0, 1119, 1117, 1, 0, 0, 0, 1120, 1122, 3, - 48, 24, 0, 1121, 1120, 1, 0, 0, 0, 1121, 1122, 1, 0, 0, 0, 1122, 1128, - 1, 0, 0, 0, 1123, 1129, 5, 88, 0, 0, 1124, 1129, 5, 122, 0, 0, 1125, 1126, - 5, 88, 0, 0, 1126, 1127, 5, 108, 0, 0, 1127, 1129, 7, 8, 0, 0, 1128, 1123, - 1, 0, 0, 0, 1128, 1124, 1, 0, 0, 0, 1128, 1125, 1, 0, 0, 0, 1129, 1130, - 1, 0, 0, 0, 1130, 1134, 5, 91, 0, 0, 1131, 1132, 3, 182, 91, 0, 1132, 1133, - 5, 2, 0, 0, 1133, 1135, 1, 0, 0, 0, 1134, 1131, 1, 0, 0, 0, 1134, 1135, - 1, 0, 0, 0, 1135, 1136, 1, 0, 0, 0, 1136, 1139, 3, 184, 92, 0, 1137, 1138, - 5, 33, 0, 0, 1138, 1140, 3, 206, 103, 0, 1139, 1137, 1, 0, 0, 0, 1139, - 1140, 1, 0, 0, 0, 1140, 1152, 1, 0, 0, 0, 1141, 1142, 5, 3, 0, 0, 1142, - 1147, 3, 188, 94, 0, 1143, 1144, 5, 5, 0, 0, 1144, 1146, 3, 188, 94, 0, - 1145, 1143, 1, 0, 0, 0, 1146, 1149, 1, 0, 0, 0, 1147, 1145, 1, 0, 0, 0, - 1147, 1148, 1, 0, 0, 0, 1148, 1150, 1, 0, 0, 0, 1149, 1147, 1, 0, 0, 0, - 1150, 1151, 5, 4, 0, 0, 1151, 1153, 1, 0, 0, 0, 1152, 1141, 1, 0, 0, 0, - 1152, 1153, 1, 0, 0, 0, 1153, 1163, 1, 0, 0, 0, 1154, 1157, 3, 72, 36, - 0, 1155, 1157, 3, 86, 43, 0, 1156, 1154, 1, 0, 0, 0, 1156, 1155, 1, 0, - 0, 0, 1157, 1159, 1, 0, 0, 0, 1158, 1160, 3, 78, 39, 0, 1159, 1158, 1, - 0, 0, 0, 1159, 1160, 1, 0, 0, 0, 1160, 1164, 1, 0, 0, 0, 1161, 1162, 5, - 56, 0, 0, 1162, 1164, 5, 144, 0, 0, 1163, 1156, 1, 0, 0, 0, 1163, 1161, - 1, 0, 0, 0, 1164, 1166, 1, 0, 0, 0, 1165, 1167, 3, 76, 38, 0, 1166, 1165, - 1, 0, 0, 0, 1166, 1167, 1, 0, 0, 0, 1167, 75, 1, 0, 0, 0, 1168, 1169, 5, - 124, 0, 0, 1169, 1174, 3, 100, 50, 0, 1170, 1171, 5, 5, 0, 0, 1171, 1173, - 3, 100, 50, 0, 1172, 1170, 1, 0, 0, 0, 1173, 1176, 1, 0, 0, 0, 1174, 1172, - 1, 0, 0, 0, 1174, 1175, 1, 0, 0, 0, 1175, 77, 1, 0, 0, 0, 1176, 1174, 1, - 0, 0, 0, 1177, 1178, 5, 107, 0, 0, 1178, 1193, 5, 48, 0, 0, 1179, 1180, - 5, 3, 0, 0, 1180, 1185, 3, 24, 12, 0, 1181, 1182, 5, 5, 0, 0, 1182, 1184, - 3, 24, 12, 0, 1183, 1181, 1, 0, 0, 0, 1184, 1187, 1, 0, 0, 0, 1185, 1183, - 1, 0, 0, 0, 1185, 1186, 1, 0, 0, 0, 1186, 1188, 1, 0, 0, 0, 1187, 1185, - 1, 0, 0, 0, 1188, 1191, 5, 4, 0, 0, 1189, 1190, 5, 148, 0, 0, 1190, 1192, - 3, 64, 32, 0, 1191, 1189, 1, 0, 0, 0, 1191, 1192, 1, 0, 0, 0, 1192, 1194, - 1, 0, 0, 0, 1193, 1179, 1, 0, 0, 0, 1193, 1194, 1, 0, 0, 0, 1194, 1195, - 1, 0, 0, 0, 1195, 1222, 5, 183, 0, 0, 1196, 1223, 5, 184, 0, 0, 1197, 1198, - 5, 141, 0, 0, 1198, 1201, 5, 131, 0, 0, 1199, 1202, 3, 188, 94, 0, 1200, - 1202, 3, 110, 55, 0, 1201, 1199, 1, 0, 0, 0, 1201, 1200, 1, 0, 0, 0, 1202, - 1203, 1, 0, 0, 0, 1203, 1204, 5, 6, 0, 0, 1204, 1215, 3, 64, 32, 0, 1205, - 1208, 5, 5, 0, 0, 1206, 1209, 3, 188, 94, 0, 1207, 1209, 3, 110, 55, 0, - 1208, 1206, 1, 0, 0, 0, 1208, 1207, 1, 0, 0, 0, 1209, 1210, 1, 0, 0, 0, - 1210, 1211, 5, 6, 0, 0, 1211, 1212, 3, 64, 32, 0, 1212, 1214, 1, 0, 0, - 0, 1213, 1205, 1, 0, 0, 0, 1214, 1217, 1, 0, 0, 0, 1215, 1213, 1, 0, 0, - 0, 1215, 1216, 1, 0, 0, 0, 1216, 1220, 1, 0, 0, 0, 1217, 1215, 1, 0, 0, - 0, 1218, 1219, 5, 148, 0, 0, 1219, 1221, 3, 64, 32, 0, 1220, 1218, 1, 0, - 0, 0, 1220, 1221, 1, 0, 0, 0, 1221, 1223, 1, 0, 0, 0, 1222, 1196, 1, 0, - 0, 0, 1222, 1197, 1, 0, 0, 0, 1223, 79, 1, 0, 0, 0, 1224, 1228, 5, 112, - 0, 0, 1225, 1226, 3, 182, 91, 0, 1226, 1227, 5, 2, 0, 0, 1227, 1229, 1, - 0, 0, 0, 1228, 1225, 1, 0, 0, 0, 1228, 1229, 1, 0, 0, 0, 1229, 1230, 1, - 0, 0, 0, 1230, 1237, 3, 202, 101, 0, 1231, 1232, 5, 6, 0, 0, 1232, 1238, - 3, 82, 41, 0, 1233, 1234, 5, 3, 0, 0, 1234, 1235, 3, 82, 41, 0, 1235, 1236, - 5, 4, 0, 0, 1236, 1238, 1, 0, 0, 0, 1237, 1231, 1, 0, 0, 0, 1237, 1233, - 1, 0, 0, 0, 1237, 1238, 1, 0, 0, 0, 1238, 81, 1, 0, 0, 0, 1239, 1243, 3, - 34, 17, 0, 1240, 1243, 3, 178, 89, 0, 1241, 1243, 5, 188, 0, 0, 1242, 1239, - 1, 0, 0, 0, 1242, 1240, 1, 0, 0, 0, 1242, 1241, 1, 0, 0, 0, 1243, 83, 1, - 0, 0, 0, 1244, 1255, 5, 119, 0, 0, 1245, 1256, 3, 190, 95, 0, 1246, 1247, - 3, 182, 91, 0, 1247, 1248, 5, 2, 0, 0, 1248, 1250, 1, 0, 0, 0, 1249, 1246, - 1, 0, 0, 0, 1249, 1250, 1, 0, 0, 0, 1250, 1253, 1, 0, 0, 0, 1251, 1254, - 3, 184, 92, 0, 1252, 1254, 3, 194, 97, 0, 1253, 1251, 1, 0, 0, 0, 1253, - 1252, 1, 0, 0, 0, 1254, 1256, 1, 0, 0, 0, 1255, 1245, 1, 0, 0, 0, 1255, - 1249, 1, 0, 0, 0, 1255, 1256, 1, 0, 0, 0, 1256, 85, 1, 0, 0, 0, 1257, 1259, - 3, 134, 67, 0, 1258, 1257, 1, 0, 0, 0, 1258, 1259, 1, 0, 0, 0, 1259, 1260, - 1, 0, 0, 0, 1260, 1266, 3, 90, 45, 0, 1261, 1262, 3, 106, 53, 0, 1262, - 1263, 3, 90, 45, 0, 1263, 1265, 1, 0, 0, 0, 1264, 1261, 1, 0, 0, 0, 1265, - 1268, 1, 0, 0, 0, 1266, 1264, 1, 0, 0, 0, 1266, 1267, 1, 0, 0, 0, 1267, - 1270, 1, 0, 0, 0, 1268, 1266, 1, 0, 0, 0, 1269, 1271, 3, 136, 68, 0, 1270, - 1269, 1, 0, 0, 0, 1270, 1271, 1, 0, 0, 0, 1271, 1273, 1, 0, 0, 0, 1272, - 1274, 3, 138, 69, 0, 1273, 1272, 1, 0, 0, 0, 1273, 1274, 1, 0, 0, 0, 1274, - 87, 1, 0, 0, 0, 1275, 1283, 3, 98, 49, 0, 1276, 1277, 3, 102, 51, 0, 1277, - 1279, 3, 98, 49, 0, 1278, 1280, 3, 104, 52, 0, 1279, 1278, 1, 0, 0, 0, - 1279, 1280, 1, 0, 0, 0, 1280, 1282, 1, 0, 0, 0, 1281, 1276, 1, 0, 0, 0, - 1282, 1285, 1, 0, 0, 0, 1283, 1281, 1, 0, 0, 0, 1283, 1284, 1, 0, 0, 0, - 1284, 89, 1, 0, 0, 0, 1285, 1283, 1, 0, 0, 0, 1286, 1288, 5, 130, 0, 0, - 1287, 1289, 7, 16, 0, 0, 1288, 1287, 1, 0, 0, 0, 1288, 1289, 1, 0, 0, 0, - 1289, 1290, 1, 0, 0, 0, 1290, 1295, 3, 100, 50, 0, 1291, 1292, 5, 5, 0, - 0, 1292, 1294, 3, 100, 50, 0, 1293, 1291, 1, 0, 0, 0, 1294, 1297, 1, 0, - 0, 0, 1295, 1293, 1, 0, 0, 0, 1295, 1296, 1, 0, 0, 0, 1296, 1310, 1, 0, - 0, 0, 1297, 1295, 1, 0, 0, 0, 1298, 1308, 5, 75, 0, 0, 1299, 1304, 3, 98, - 49, 0, 1300, 1301, 5, 5, 0, 0, 1301, 1303, 3, 98, 49, 0, 1302, 1300, 1, - 0, 0, 0, 1303, 1306, 1, 0, 0, 0, 1304, 1302, 1, 0, 0, 0, 1304, 1305, 1, - 0, 0, 0, 1305, 1309, 1, 0, 0, 0, 1306, 1304, 1, 0, 0, 0, 1307, 1309, 3, - 88, 44, 0, 1308, 1299, 1, 0, 0, 0, 1308, 1307, 1, 0, 0, 0, 1309, 1311, - 1, 0, 0, 0, 1310, 1298, 1, 0, 0, 0, 1310, 1311, 1, 0, 0, 0, 1311, 1314, - 1, 0, 0, 0, 1312, 1313, 5, 148, 0, 0, 1313, 1315, 3, 64, 32, 0, 1314, 1312, - 1, 0, 0, 0, 1314, 1315, 1, 0, 0, 0, 1315, 1330, 1, 0, 0, 0, 1316, 1317, - 5, 78, 0, 0, 1317, 1318, 5, 40, 0, 0, 1318, 1323, 3, 64, 32, 0, 1319, 1320, - 5, 5, 0, 0, 1320, 1322, 3, 64, 32, 0, 1321, 1319, 1, 0, 0, 0, 1322, 1325, - 1, 0, 0, 0, 1323, 1321, 1, 0, 0, 0, 1323, 1324, 1, 0, 0, 0, 1324, 1328, - 1, 0, 0, 0, 1325, 1323, 1, 0, 0, 0, 1326, 1327, 5, 79, 0, 0, 1327, 1329, - 3, 64, 32, 0, 1328, 1326, 1, 0, 0, 0, 1328, 1329, 1, 0, 0, 0, 1329, 1331, - 1, 0, 0, 0, 1330, 1316, 1, 0, 0, 0, 1330, 1331, 1, 0, 0, 0, 1331, 1346, - 1, 0, 0, 0, 1332, 1333, 5, 174, 0, 0, 1333, 1334, 3, 210, 105, 0, 1334, - 1335, 5, 33, 0, 0, 1335, 1343, 3, 120, 60, 0, 1336, 1337, 5, 5, 0, 0, 1337, - 1338, 3, 210, 105, 0, 1338, 1339, 5, 33, 0, 0, 1339, 1340, 3, 120, 60, - 0, 1340, 1342, 1, 0, 0, 0, 1341, 1336, 1, 0, 0, 0, 1342, 1345, 1, 0, 0, - 0, 1343, 1341, 1, 0, 0, 0, 1343, 1344, 1, 0, 0, 0, 1344, 1347, 1, 0, 0, - 0, 1345, 1343, 1, 0, 0, 0, 1346, 1332, 1, 0, 0, 0, 1346, 1347, 1, 0, 0, - 0, 1347, 1350, 1, 0, 0, 0, 1348, 1350, 3, 72, 36, 0, 1349, 1286, 1, 0, - 0, 0, 1349, 1348, 1, 0, 0, 0, 1350, 91, 1, 0, 0, 0, 1351, 1352, 3, 86, - 43, 0, 1352, 93, 1, 0, 0, 0, 1353, 1355, 3, 134, 67, 0, 1354, 1353, 1, - 0, 0, 0, 1354, 1355, 1, 0, 0, 0, 1355, 1356, 1, 0, 0, 0, 1356, 1358, 3, - 90, 45, 0, 1357, 1359, 3, 136, 68, 0, 1358, 1357, 1, 0, 0, 0, 1358, 1359, - 1, 0, 0, 0, 1359, 1361, 1, 0, 0, 0, 1360, 1362, 3, 138, 69, 0, 1361, 1360, - 1, 0, 0, 0, 1361, 1362, 1, 0, 0, 0, 1362, 95, 1, 0, 0, 0, 1363, 1365, 3, - 134, 67, 0, 1364, 1363, 1, 0, 0, 0, 1364, 1365, 1, 0, 0, 0, 1365, 1366, - 1, 0, 0, 0, 1366, 1376, 3, 90, 45, 0, 1367, 1369, 5, 139, 0, 0, 1368, 1370, - 5, 29, 0, 0, 1369, 1368, 1, 0, 0, 0, 1369, 1370, 1, 0, 0, 0, 1370, 1374, - 1, 0, 0, 0, 1371, 1374, 5, 90, 0, 0, 1372, 1374, 5, 68, 0, 0, 1373, 1367, - 1, 0, 0, 0, 1373, 1371, 1, 0, 0, 0, 1373, 1372, 1, 0, 0, 0, 1374, 1375, - 1, 0, 0, 0, 1375, 1377, 3, 90, 45, 0, 1376, 1373, 1, 0, 0, 0, 1377, 1378, - 1, 0, 0, 0, 1378, 1376, 1, 0, 0, 0, 1378, 1379, 1, 0, 0, 0, 1379, 1381, - 1, 0, 0, 0, 1380, 1382, 3, 136, 68, 0, 1381, 1380, 1, 0, 0, 0, 1381, 1382, - 1, 0, 0, 0, 1382, 1384, 1, 0, 0, 0, 1383, 1385, 3, 138, 69, 0, 1384, 1383, - 1, 0, 0, 0, 1384, 1385, 1, 0, 0, 0, 1385, 97, 1, 0, 0, 0, 1386, 1387, 3, - 182, 91, 0, 1387, 1388, 5, 2, 0, 0, 1388, 1390, 1, 0, 0, 0, 1389, 1386, - 1, 0, 0, 0, 1389, 1390, 1, 0, 0, 0, 1390, 1391, 1, 0, 0, 0, 1391, 1396, - 3, 184, 92, 0, 1392, 1394, 5, 33, 0, 0, 1393, 1392, 1, 0, 0, 0, 1393, 1394, - 1, 0, 0, 0, 1394, 1395, 1, 0, 0, 0, 1395, 1397, 3, 206, 103, 0, 1396, 1393, - 1, 0, 0, 0, 1396, 1397, 1, 0, 0, 0, 1397, 1403, 1, 0, 0, 0, 1398, 1399, - 5, 85, 0, 0, 1399, 1400, 5, 40, 0, 0, 1400, 1404, 3, 194, 97, 0, 1401, - 1402, 5, 102, 0, 0, 1402, 1404, 5, 85, 0, 0, 1403, 1398, 1, 0, 0, 0, 1403, - 1401, 1, 0, 0, 0, 1403, 1404, 1, 0, 0, 0, 1404, 1451, 1, 0, 0, 0, 1405, - 1406, 3, 182, 91, 0, 1406, 1407, 5, 2, 0, 0, 1407, 1409, 1, 0, 0, 0, 1408, - 1405, 1, 0, 0, 0, 1408, 1409, 1, 0, 0, 0, 1409, 1410, 1, 0, 0, 0, 1410, - 1411, 3, 222, 111, 0, 1411, 1412, 5, 3, 0, 0, 1412, 1417, 3, 64, 32, 0, - 1413, 1414, 5, 5, 0, 0, 1414, 1416, 3, 64, 32, 0, 1415, 1413, 1, 0, 0, - 0, 1416, 1419, 1, 0, 0, 0, 1417, 1415, 1, 0, 0, 0, 1417, 1418, 1, 0, 0, - 0, 1418, 1420, 1, 0, 0, 0, 1419, 1417, 1, 0, 0, 0, 1420, 1425, 5, 4, 0, - 0, 1421, 1423, 5, 33, 0, 0, 1422, 1421, 1, 0, 0, 0, 1422, 1423, 1, 0, 0, - 0, 1423, 1424, 1, 0, 0, 0, 1424, 1426, 3, 206, 103, 0, 1425, 1422, 1, 0, - 0, 0, 1425, 1426, 1, 0, 0, 0, 1426, 1451, 1, 0, 0, 0, 1427, 1437, 5, 3, - 0, 0, 1428, 1433, 3, 98, 49, 0, 1429, 1430, 5, 5, 0, 0, 1430, 1432, 3, - 98, 49, 0, 1431, 1429, 1, 0, 0, 0, 1432, 1435, 1, 0, 0, 0, 1433, 1431, - 1, 0, 0, 0, 1433, 1434, 1, 0, 0, 0, 1434, 1438, 1, 0, 0, 0, 1435, 1433, - 1, 0, 0, 0, 1436, 1438, 3, 88, 44, 0, 1437, 1428, 1, 0, 0, 0, 1437, 1436, - 1, 0, 0, 0, 1438, 1439, 1, 0, 0, 0, 1439, 1440, 5, 4, 0, 0, 1440, 1451, - 1, 0, 0, 0, 1441, 1442, 5, 3, 0, 0, 1442, 1443, 3, 86, 43, 0, 1443, 1448, - 5, 4, 0, 0, 1444, 1446, 5, 33, 0, 0, 1445, 1444, 1, 0, 0, 0, 1445, 1446, - 1, 0, 0, 0, 1446, 1447, 1, 0, 0, 0, 1447, 1449, 3, 206, 103, 0, 1448, 1445, - 1, 0, 0, 0, 1448, 1449, 1, 0, 0, 0, 1449, 1451, 1, 0, 0, 0, 1450, 1389, - 1, 0, 0, 0, 1450, 1408, 1, 0, 0, 0, 1450, 1427, 1, 0, 0, 0, 1450, 1441, - 1, 0, 0, 0, 1451, 99, 1, 0, 0, 0, 1452, 1465, 5, 7, 0, 0, 1453, 1454, 3, - 184, 92, 0, 1454, 1455, 5, 2, 0, 0, 1455, 1456, 5, 7, 0, 0, 1456, 1465, - 1, 0, 0, 0, 1457, 1462, 3, 64, 32, 0, 1458, 1460, 5, 33, 0, 0, 1459, 1458, - 1, 0, 0, 0, 1459, 1460, 1, 0, 0, 0, 1460, 1461, 1, 0, 0, 0, 1461, 1463, - 3, 174, 87, 0, 1462, 1459, 1, 0, 0, 0, 1462, 1463, 1, 0, 0, 0, 1463, 1465, - 1, 0, 0, 0, 1464, 1452, 1, 0, 0, 0, 1464, 1453, 1, 0, 0, 0, 1464, 1457, - 1, 0, 0, 0, 1465, 101, 1, 0, 0, 0, 1466, 1480, 5, 5, 0, 0, 1467, 1469, - 5, 100, 0, 0, 1468, 1467, 1, 0, 0, 0, 1468, 1469, 1, 0, 0, 0, 1469, 1476, - 1, 0, 0, 0, 1470, 1472, 5, 96, 0, 0, 1471, 1473, 5, 110, 0, 0, 1472, 1471, - 1, 0, 0, 0, 1472, 1473, 1, 0, 0, 0, 1473, 1477, 1, 0, 0, 0, 1474, 1477, - 5, 87, 0, 0, 1475, 1477, 5, 51, 0, 0, 1476, 1470, 1, 0, 0, 0, 1476, 1474, - 1, 0, 0, 0, 1476, 1475, 1, 0, 0, 0, 1476, 1477, 1, 0, 0, 0, 1477, 1478, - 1, 0, 0, 0, 1478, 1480, 5, 94, 0, 0, 1479, 1466, 1, 0, 0, 0, 1479, 1468, - 1, 0, 0, 0, 1480, 103, 1, 0, 0, 0, 1481, 1482, 5, 107, 0, 0, 1482, 1496, - 3, 64, 32, 0, 1483, 1484, 5, 142, 0, 0, 1484, 1485, 5, 3, 0, 0, 1485, 1490, - 3, 188, 94, 0, 1486, 1487, 5, 5, 0, 0, 1487, 1489, 3, 188, 94, 0, 1488, - 1486, 1, 0, 0, 0, 1489, 1492, 1, 0, 0, 0, 1490, 1488, 1, 0, 0, 0, 1490, - 1491, 1, 0, 0, 0, 1491, 1493, 1, 0, 0, 0, 1492, 1490, 1, 0, 0, 0, 1493, - 1494, 5, 4, 0, 0, 1494, 1496, 1, 0, 0, 0, 1495, 1481, 1, 0, 0, 0, 1495, - 1483, 1, 0, 0, 0, 1496, 105, 1, 0, 0, 0, 1497, 1499, 5, 139, 0, 0, 1498, - 1500, 5, 29, 0, 0, 1499, 1498, 1, 0, 0, 0, 1499, 1500, 1, 0, 0, 0, 1500, - 1504, 1, 0, 0, 0, 1501, 1504, 5, 90, 0, 0, 1502, 1504, 5, 68, 0, 0, 1503, - 1497, 1, 0, 0, 0, 1503, 1501, 1, 0, 0, 0, 1503, 1502, 1, 0, 0, 0, 1504, - 107, 1, 0, 0, 0, 1505, 1507, 3, 48, 24, 0, 1506, 1505, 1, 0, 0, 0, 1506, - 1507, 1, 0, 0, 0, 1507, 1508, 1, 0, 0, 0, 1508, 1511, 5, 141, 0, 0, 1509, - 1510, 5, 108, 0, 0, 1510, 1512, 7, 8, 0, 0, 1511, 1509, 1, 0, 0, 0, 1511, - 1512, 1, 0, 0, 0, 1512, 1513, 1, 0, 0, 0, 1513, 1514, 3, 114, 57, 0, 1514, - 1517, 5, 131, 0, 0, 1515, 1518, 3, 188, 94, 0, 1516, 1518, 3, 110, 55, - 0, 1517, 1515, 1, 0, 0, 0, 1517, 1516, 1, 0, 0, 0, 1518, 1519, 1, 0, 0, - 0, 1519, 1520, 5, 6, 0, 0, 1520, 1531, 3, 64, 32, 0, 1521, 1524, 5, 5, - 0, 0, 1522, 1525, 3, 188, 94, 0, 1523, 1525, 3, 110, 55, 0, 1524, 1522, - 1, 0, 0, 0, 1524, 1523, 1, 0, 0, 0, 1525, 1526, 1, 0, 0, 0, 1526, 1527, - 5, 6, 0, 0, 1527, 1528, 3, 64, 32, 0, 1528, 1530, 1, 0, 0, 0, 1529, 1521, - 1, 0, 0, 0, 1530, 1533, 1, 0, 0, 0, 1531, 1529, 1, 0, 0, 0, 1531, 1532, - 1, 0, 0, 0, 1532, 1546, 1, 0, 0, 0, 1533, 1531, 1, 0, 0, 0, 1534, 1544, - 5, 75, 0, 0, 1535, 1540, 3, 98, 49, 0, 1536, 1537, 5, 5, 0, 0, 1537, 1539, - 3, 98, 49, 0, 1538, 1536, 1, 0, 0, 0, 1539, 1542, 1, 0, 0, 0, 1540, 1538, - 1, 0, 0, 0, 1540, 1541, 1, 0, 0, 0, 1541, 1545, 1, 0, 0, 0, 1542, 1540, - 1, 0, 0, 0, 1543, 1545, 3, 88, 44, 0, 1544, 1535, 1, 0, 0, 0, 1544, 1543, - 1, 0, 0, 0, 1545, 1547, 1, 0, 0, 0, 1546, 1534, 1, 0, 0, 0, 1546, 1547, - 1, 0, 0, 0, 1547, 1550, 1, 0, 0, 0, 1548, 1549, 5, 148, 0, 0, 1549, 1551, - 3, 64, 32, 0, 1550, 1548, 1, 0, 0, 0, 1550, 1551, 1, 0, 0, 0, 1551, 1553, - 1, 0, 0, 0, 1552, 1554, 3, 76, 38, 0, 1553, 1552, 1, 0, 0, 0, 1553, 1554, - 1, 0, 0, 0, 1554, 109, 1, 0, 0, 0, 1555, 1556, 5, 3, 0, 0, 1556, 1561, - 3, 188, 94, 0, 1557, 1558, 5, 5, 0, 0, 1558, 1560, 3, 188, 94, 0, 1559, - 1557, 1, 0, 0, 0, 1560, 1563, 1, 0, 0, 0, 1561, 1559, 1, 0, 0, 0, 1561, - 1562, 1, 0, 0, 0, 1562, 1564, 1, 0, 0, 0, 1563, 1561, 1, 0, 0, 0, 1564, - 1565, 5, 4, 0, 0, 1565, 111, 1, 0, 0, 0, 1566, 1568, 3, 48, 24, 0, 1567, - 1566, 1, 0, 0, 0, 1567, 1568, 1, 0, 0, 0, 1568, 1569, 1, 0, 0, 0, 1569, - 1572, 5, 141, 0, 0, 1570, 1571, 5, 108, 0, 0, 1571, 1573, 7, 8, 0, 0, 1572, - 1570, 1, 0, 0, 0, 1572, 1573, 1, 0, 0, 0, 1573, 1574, 1, 0, 0, 0, 1574, - 1575, 3, 114, 57, 0, 1575, 1578, 5, 131, 0, 0, 1576, 1579, 3, 188, 94, - 0, 1577, 1579, 3, 110, 55, 0, 1578, 1576, 1, 0, 0, 0, 1578, 1577, 1, 0, - 0, 0, 1579, 1580, 1, 0, 0, 0, 1580, 1581, 5, 6, 0, 0, 1581, 1592, 3, 64, - 32, 0, 1582, 1585, 5, 5, 0, 0, 1583, 1586, 3, 188, 94, 0, 1584, 1586, 3, - 110, 55, 0, 1585, 1583, 1, 0, 0, 0, 1585, 1584, 1, 0, 0, 0, 1586, 1587, - 1, 0, 0, 0, 1587, 1588, 5, 6, 0, 0, 1588, 1589, 3, 64, 32, 0, 1589, 1591, - 1, 0, 0, 0, 1590, 1582, 1, 0, 0, 0, 1591, 1594, 1, 0, 0, 0, 1592, 1590, - 1, 0, 0, 0, 1592, 1593, 1, 0, 0, 0, 1593, 1597, 1, 0, 0, 0, 1594, 1592, - 1, 0, 0, 0, 1595, 1596, 5, 148, 0, 0, 1596, 1598, 3, 64, 32, 0, 1597, 1595, - 1, 0, 0, 0, 1597, 1598, 1, 0, 0, 0, 1598, 1600, 1, 0, 0, 0, 1599, 1601, - 3, 76, 38, 0, 1600, 1599, 1, 0, 0, 0, 1600, 1601, 1, 0, 0, 0, 1601, 1606, - 1, 0, 0, 0, 1602, 1604, 3, 136, 68, 0, 1603, 1602, 1, 0, 0, 0, 1603, 1604, - 1, 0, 0, 0, 1604, 1605, 1, 0, 0, 0, 1605, 1607, 3, 138, 69, 0, 1606, 1603, - 1, 0, 0, 0, 1606, 1607, 1, 0, 0, 0, 1607, 113, 1, 0, 0, 0, 1608, 1609, - 3, 182, 91, 0, 1609, 1610, 5, 2, 0, 0, 1610, 1612, 1, 0, 0, 0, 1611, 1608, - 1, 0, 0, 0, 1611, 1612, 1, 0, 0, 0, 1612, 1613, 1, 0, 0, 0, 1613, 1616, - 3, 184, 92, 0, 1614, 1615, 5, 33, 0, 0, 1615, 1617, 3, 212, 106, 0, 1616, - 1614, 1, 0, 0, 0, 1616, 1617, 1, 0, 0, 0, 1617, 1623, 1, 0, 0, 0, 1618, - 1619, 5, 85, 0, 0, 1619, 1620, 5, 40, 0, 0, 1620, 1624, 3, 194, 97, 0, - 1621, 1622, 5, 102, 0, 0, 1622, 1624, 5, 85, 0, 0, 1623, 1618, 1, 0, 0, - 0, 1623, 1621, 1, 0, 0, 0, 1623, 1624, 1, 0, 0, 0, 1624, 115, 1, 0, 0, - 0, 1625, 1627, 5, 143, 0, 0, 1626, 1628, 3, 182, 91, 0, 1627, 1626, 1, - 0, 0, 0, 1627, 1628, 1, 0, 0, 0, 1628, 1631, 1, 0, 0, 0, 1629, 1630, 5, - 91, 0, 0, 1630, 1632, 3, 214, 107, 0, 1631, 1629, 1, 0, 0, 0, 1631, 1632, - 1, 0, 0, 0, 1632, 117, 1, 0, 0, 0, 1633, 1634, 5, 178, 0, 0, 1634, 1635, - 5, 3, 0, 0, 1635, 1636, 5, 148, 0, 0, 1636, 1637, 3, 64, 32, 0, 1637, 1638, - 5, 4, 0, 0, 1638, 119, 1, 0, 0, 0, 1639, 1641, 5, 3, 0, 0, 1640, 1642, - 3, 216, 108, 0, 1641, 1640, 1, 0, 0, 0, 1641, 1642, 1, 0, 0, 0, 1642, 1653, - 1, 0, 0, 0, 1643, 1644, 5, 153, 0, 0, 1644, 1645, 5, 40, 0, 0, 1645, 1650, - 3, 64, 32, 0, 1646, 1647, 5, 5, 0, 0, 1647, 1649, 3, 64, 32, 0, 1648, 1646, - 1, 0, 0, 0, 1649, 1652, 1, 0, 0, 0, 1650, 1648, 1, 0, 0, 0, 1650, 1651, - 1, 0, 0, 0, 1651, 1654, 1, 0, 0, 0, 1652, 1650, 1, 0, 0, 0, 1653, 1643, - 1, 0, 0, 0, 1653, 1654, 1, 0, 0, 0, 1654, 1655, 1, 0, 0, 0, 1655, 1656, - 5, 109, 0, 0, 1656, 1657, 5, 40, 0, 0, 1657, 1662, 3, 140, 70, 0, 1658, - 1659, 5, 5, 0, 0, 1659, 1661, 3, 140, 70, 0, 1660, 1658, 1, 0, 0, 0, 1661, - 1664, 1, 0, 0, 0, 1662, 1660, 1, 0, 0, 0, 1662, 1663, 1, 0, 0, 0, 1663, - 1666, 1, 0, 0, 0, 1664, 1662, 1, 0, 0, 0, 1665, 1667, 3, 124, 62, 0, 1666, - 1665, 1, 0, 0, 0, 1666, 1667, 1, 0, 0, 0, 1667, 1668, 1, 0, 0, 0, 1668, - 1669, 5, 4, 0, 0, 1669, 121, 1, 0, 0, 0, 1670, 1704, 5, 152, 0, 0, 1671, - 1705, 3, 210, 105, 0, 1672, 1674, 5, 3, 0, 0, 1673, 1675, 3, 216, 108, - 0, 1674, 1673, 1, 0, 0, 0, 1674, 1675, 1, 0, 0, 0, 1675, 1686, 1, 0, 0, - 0, 1676, 1677, 5, 153, 0, 0, 1677, 1678, 5, 40, 0, 0, 1678, 1683, 3, 64, - 32, 0, 1679, 1680, 5, 5, 0, 0, 1680, 1682, 3, 64, 32, 0, 1681, 1679, 1, - 0, 0, 0, 1682, 1685, 1, 0, 0, 0, 1683, 1681, 1, 0, 0, 0, 1683, 1684, 1, - 0, 0, 0, 1684, 1687, 1, 0, 0, 0, 1685, 1683, 1, 0, 0, 0, 1686, 1676, 1, - 0, 0, 0, 1686, 1687, 1, 0, 0, 0, 1687, 1698, 1, 0, 0, 0, 1688, 1689, 5, - 109, 0, 0, 1689, 1690, 5, 40, 0, 0, 1690, 1695, 3, 140, 70, 0, 1691, 1692, - 5, 5, 0, 0, 1692, 1694, 3, 140, 70, 0, 1693, 1691, 1, 0, 0, 0, 1694, 1697, - 1, 0, 0, 0, 1695, 1693, 1, 0, 0, 0, 1695, 1696, 1, 0, 0, 0, 1696, 1699, - 1, 0, 0, 0, 1697, 1695, 1, 0, 0, 0, 1698, 1688, 1, 0, 0, 0, 1698, 1699, - 1, 0, 0, 0, 1699, 1701, 1, 0, 0, 0, 1700, 1702, 3, 124, 62, 0, 1701, 1700, - 1, 0, 0, 0, 1701, 1702, 1, 0, 0, 0, 1702, 1703, 1, 0, 0, 0, 1703, 1705, - 5, 4, 0, 0, 1704, 1671, 1, 0, 0, 0, 1704, 1672, 1, 0, 0, 0, 1705, 123, - 1, 0, 0, 0, 1706, 1716, 3, 126, 63, 0, 1707, 1714, 5, 180, 0, 0, 1708, - 1709, 5, 101, 0, 0, 1709, 1715, 5, 182, 0, 0, 1710, 1711, 5, 157, 0, 0, - 1711, 1715, 5, 127, 0, 0, 1712, 1715, 5, 78, 0, 0, 1713, 1715, 5, 181, - 0, 0, 1714, 1708, 1, 0, 0, 0, 1714, 1710, 1, 0, 0, 0, 1714, 1712, 1, 0, - 0, 0, 1714, 1713, 1, 0, 0, 0, 1715, 1717, 1, 0, 0, 0, 1716, 1707, 1, 0, - 0, 0, 1716, 1717, 1, 0, 0, 0, 1717, 125, 1, 0, 0, 0, 1718, 1725, 7, 17, - 0, 0, 1719, 1726, 3, 148, 74, 0, 1720, 1721, 5, 39, 0, 0, 1721, 1722, 3, - 144, 72, 0, 1722, 1723, 5, 32, 0, 0, 1723, 1724, 3, 146, 73, 0, 1724, 1726, - 1, 0, 0, 0, 1725, 1719, 1, 0, 0, 0, 1725, 1720, 1, 0, 0, 0, 1726, 127, - 1, 0, 0, 0, 1727, 1728, 3, 218, 109, 0, 1728, 1738, 5, 3, 0, 0, 1729, 1734, - 3, 64, 32, 0, 1730, 1731, 5, 5, 0, 0, 1731, 1733, 3, 64, 32, 0, 1732, 1730, - 1, 0, 0, 0, 1733, 1736, 1, 0, 0, 0, 1734, 1732, 1, 0, 0, 0, 1734, 1735, - 1, 0, 0, 0, 1735, 1739, 1, 0, 0, 0, 1736, 1734, 1, 0, 0, 0, 1737, 1739, - 5, 7, 0, 0, 1738, 1729, 1, 0, 0, 0, 1738, 1737, 1, 0, 0, 0, 1739, 1740, - 1, 0, 0, 0, 1740, 1741, 5, 4, 0, 0, 1741, 129, 1, 0, 0, 0, 1742, 1743, - 3, 220, 110, 0, 1743, 1756, 5, 3, 0, 0, 1744, 1746, 5, 62, 0, 0, 1745, - 1744, 1, 0, 0, 0, 1745, 1746, 1, 0, 0, 0, 1746, 1747, 1, 0, 0, 0, 1747, - 1752, 3, 64, 32, 0, 1748, 1749, 5, 5, 0, 0, 1749, 1751, 3, 64, 32, 0, 1750, - 1748, 1, 0, 0, 0, 1751, 1754, 1, 0, 0, 0, 1752, 1750, 1, 0, 0, 0, 1752, - 1753, 1, 0, 0, 0, 1753, 1757, 1, 0, 0, 0, 1754, 1752, 1, 0, 0, 0, 1755, - 1757, 5, 7, 0, 0, 1756, 1745, 1, 0, 0, 0, 1756, 1755, 1, 0, 0, 0, 1756, - 1757, 1, 0, 0, 0, 1757, 1758, 1, 0, 0, 0, 1758, 1760, 5, 4, 0, 0, 1759, - 1761, 3, 118, 59, 0, 1760, 1759, 1, 0, 0, 0, 1760, 1761, 1, 0, 0, 0, 1761, - 131, 1, 0, 0, 0, 1762, 1763, 3, 150, 75, 0, 1763, 1773, 5, 3, 0, 0, 1764, - 1769, 3, 64, 32, 0, 1765, 1766, 5, 5, 0, 0, 1766, 1768, 3, 64, 32, 0, 1767, - 1765, 1, 0, 0, 0, 1768, 1771, 1, 0, 0, 0, 1769, 1767, 1, 0, 0, 0, 1769, - 1770, 1, 0, 0, 0, 1770, 1774, 1, 0, 0, 0, 1771, 1769, 1, 0, 0, 0, 1772, - 1774, 5, 7, 0, 0, 1773, 1764, 1, 0, 0, 0, 1773, 1772, 1, 0, 0, 0, 1773, - 1774, 1, 0, 0, 0, 1774, 1775, 1, 0, 0, 0, 1775, 1777, 5, 4, 0, 0, 1776, - 1778, 3, 118, 59, 0, 1777, 1776, 1, 0, 0, 0, 1777, 1778, 1, 0, 0, 0, 1778, - 1779, 1, 0, 0, 0, 1779, 1782, 5, 152, 0, 0, 1780, 1783, 3, 120, 60, 0, - 1781, 1783, 3, 210, 105, 0, 1782, 1780, 1, 0, 0, 0, 1782, 1781, 1, 0, 0, - 0, 1783, 133, 1, 0, 0, 0, 1784, 1786, 5, 149, 0, 0, 1785, 1787, 5, 116, - 0, 0, 1786, 1785, 1, 0, 0, 0, 1786, 1787, 1, 0, 0, 0, 1787, 1788, 1, 0, - 0, 0, 1788, 1793, 3, 54, 27, 0, 1789, 1790, 5, 5, 0, 0, 1790, 1792, 3, - 54, 27, 0, 1791, 1789, 1, 0, 0, 0, 1792, 1795, 1, 0, 0, 0, 1793, 1791, - 1, 0, 0, 0, 1793, 1794, 1, 0, 0, 0, 1794, 135, 1, 0, 0, 0, 1795, 1793, - 1, 0, 0, 0, 1796, 1797, 5, 109, 0, 0, 1797, 1798, 5, 40, 0, 0, 1798, 1803, - 3, 140, 70, 0, 1799, 1800, 5, 5, 0, 0, 1800, 1802, 3, 140, 70, 0, 1801, - 1799, 1, 0, 0, 0, 1802, 1805, 1, 0, 0, 0, 1803, 1801, 1, 0, 0, 0, 1803, - 1804, 1, 0, 0, 0, 1804, 137, 1, 0, 0, 0, 1805, 1803, 1, 0, 0, 0, 1806, - 1807, 5, 98, 0, 0, 1807, 1810, 3, 64, 32, 0, 1808, 1809, 7, 18, 0, 0, 1809, - 1811, 3, 64, 32, 0, 1810, 1808, 1, 0, 0, 0, 1810, 1811, 1, 0, 0, 0, 1811, - 139, 1, 0, 0, 0, 1812, 1815, 3, 64, 32, 0, 1813, 1814, 5, 45, 0, 0, 1814, - 1816, 3, 190, 95, 0, 1815, 1813, 1, 0, 0, 0, 1815, 1816, 1, 0, 0, 0, 1816, - 1818, 1, 0, 0, 0, 1817, 1819, 3, 142, 71, 0, 1818, 1817, 1, 0, 0, 0, 1818, - 1819, 1, 0, 0, 0, 1819, 1822, 1, 0, 0, 0, 1820, 1821, 5, 175, 0, 0, 1821, - 1823, 7, 19, 0, 0, 1822, 1820, 1, 0, 0, 0, 1822, 1823, 1, 0, 0, 0, 1823, - 141, 1, 0, 0, 0, 1824, 1825, 7, 20, 0, 0, 1825, 143, 1, 0, 0, 0, 1826, - 1827, 3, 64, 32, 0, 1827, 1828, 5, 155, 0, 0, 1828, 1837, 1, 0, 0, 0, 1829, - 1830, 3, 64, 32, 0, 1830, 1831, 5, 158, 0, 0, 1831, 1837, 1, 0, 0, 0, 1832, - 1833, 5, 157, 0, 0, 1833, 1837, 5, 127, 0, 0, 1834, 1835, 5, 156, 0, 0, - 1835, 1837, 5, 155, 0, 0, 1836, 1826, 1, 0, 0, 0, 1836, 1829, 1, 0, 0, - 0, 1836, 1832, 1, 0, 0, 0, 1836, 1834, 1, 0, 0, 0, 1837, 145, 1, 0, 0, - 0, 1838, 1839, 3, 64, 32, 0, 1839, 1840, 5, 155, 0, 0, 1840, 1849, 1, 0, - 0, 0, 1841, 1842, 3, 64, 32, 0, 1842, 1843, 5, 158, 0, 0, 1843, 1849, 1, - 0, 0, 0, 1844, 1845, 5, 157, 0, 0, 1845, 1849, 5, 127, 0, 0, 1846, 1847, - 5, 156, 0, 0, 1847, 1849, 5, 158, 0, 0, 1848, 1838, 1, 0, 0, 0, 1848, 1841, - 1, 0, 0, 0, 1848, 1844, 1, 0, 0, 0, 1848, 1846, 1, 0, 0, 0, 1849, 147, - 1, 0, 0, 0, 1850, 1851, 3, 64, 32, 0, 1851, 1852, 5, 155, 0, 0, 1852, 1858, - 1, 0, 0, 0, 1853, 1854, 5, 156, 0, 0, 1854, 1858, 5, 155, 0, 0, 1855, 1856, - 5, 157, 0, 0, 1856, 1858, 5, 127, 0, 0, 1857, 1850, 1, 0, 0, 0, 1857, 1853, - 1, 0, 0, 0, 1857, 1855, 1, 0, 0, 0, 1858, 149, 1, 0, 0, 0, 1859, 1860, - 7, 21, 0, 0, 1860, 1861, 5, 3, 0, 0, 1861, 1862, 3, 64, 32, 0, 1862, 1863, - 5, 4, 0, 0, 1863, 1864, 5, 152, 0, 0, 1864, 1866, 5, 3, 0, 0, 1865, 1867, - 3, 156, 78, 0, 1866, 1865, 1, 0, 0, 0, 1866, 1867, 1, 0, 0, 0, 1867, 1868, - 1, 0, 0, 0, 1868, 1870, 3, 160, 80, 0, 1869, 1871, 3, 126, 63, 0, 1870, - 1869, 1, 0, 0, 0, 1870, 1871, 1, 0, 0, 0, 1871, 1872, 1, 0, 0, 0, 1872, - 1873, 5, 4, 0, 0, 1873, 1945, 1, 0, 0, 0, 1874, 1875, 7, 22, 0, 0, 1875, - 1876, 5, 3, 0, 0, 1876, 1877, 5, 4, 0, 0, 1877, 1878, 5, 152, 0, 0, 1878, - 1880, 5, 3, 0, 0, 1879, 1881, 3, 156, 78, 0, 1880, 1879, 1, 0, 0, 0, 1880, - 1881, 1, 0, 0, 0, 1881, 1883, 1, 0, 0, 0, 1882, 1884, 3, 158, 79, 0, 1883, - 1882, 1, 0, 0, 0, 1883, 1884, 1, 0, 0, 0, 1884, 1885, 1, 0, 0, 0, 1885, - 1945, 5, 4, 0, 0, 1886, 1887, 7, 23, 0, 0, 1887, 1888, 5, 3, 0, 0, 1888, - 1889, 5, 4, 0, 0, 1889, 1890, 5, 152, 0, 0, 1890, 1892, 5, 3, 0, 0, 1891, - 1893, 3, 156, 78, 0, 1892, 1891, 1, 0, 0, 0, 1892, 1893, 1, 0, 0, 0, 1893, - 1894, 1, 0, 0, 0, 1894, 1895, 3, 160, 80, 0, 1895, 1896, 5, 4, 0, 0, 1896, - 1945, 1, 0, 0, 0, 1897, 1898, 7, 24, 0, 0, 1898, 1899, 5, 3, 0, 0, 1899, - 1901, 3, 64, 32, 0, 1900, 1902, 3, 152, 76, 0, 1901, 1900, 1, 0, 0, 0, - 1901, 1902, 1, 0, 0, 0, 1902, 1904, 1, 0, 0, 0, 1903, 1905, 3, 154, 77, - 0, 1904, 1903, 1, 0, 0, 0, 1904, 1905, 1, 0, 0, 0, 1905, 1906, 1, 0, 0, - 0, 1906, 1907, 5, 4, 0, 0, 1907, 1908, 5, 152, 0, 0, 1908, 1910, 5, 3, - 0, 0, 1909, 1911, 3, 156, 78, 0, 1910, 1909, 1, 0, 0, 0, 1910, 1911, 1, - 0, 0, 0, 1911, 1912, 1, 0, 0, 0, 1912, 1913, 3, 160, 80, 0, 1913, 1914, - 5, 4, 0, 0, 1914, 1945, 1, 0, 0, 0, 1915, 1916, 5, 164, 0, 0, 1916, 1917, - 5, 3, 0, 0, 1917, 1918, 3, 64, 32, 0, 1918, 1919, 5, 5, 0, 0, 1919, 1920, - 3, 34, 17, 0, 1920, 1921, 5, 4, 0, 0, 1921, 1922, 5, 152, 0, 0, 1922, 1924, - 5, 3, 0, 0, 1923, 1925, 3, 156, 78, 0, 1924, 1923, 1, 0, 0, 0, 1924, 1925, - 1, 0, 0, 0, 1925, 1926, 1, 0, 0, 0, 1926, 1928, 3, 160, 80, 0, 1927, 1929, - 3, 126, 63, 0, 1928, 1927, 1, 0, 0, 0, 1928, 1929, 1, 0, 0, 0, 1929, 1930, - 1, 0, 0, 0, 1930, 1931, 5, 4, 0, 0, 1931, 1945, 1, 0, 0, 0, 1932, 1933, - 5, 165, 0, 0, 1933, 1934, 5, 3, 0, 0, 1934, 1935, 3, 64, 32, 0, 1935, 1936, - 5, 4, 0, 0, 1936, 1937, 5, 152, 0, 0, 1937, 1939, 5, 3, 0, 0, 1938, 1940, - 3, 156, 78, 0, 1939, 1938, 1, 0, 0, 0, 1939, 1940, 1, 0, 0, 0, 1940, 1941, - 1, 0, 0, 0, 1941, 1942, 3, 160, 80, 0, 1942, 1943, 5, 4, 0, 0, 1943, 1945, - 1, 0, 0, 0, 1944, 1859, 1, 0, 0, 0, 1944, 1874, 1, 0, 0, 0, 1944, 1886, - 1, 0, 0, 0, 1944, 1897, 1, 0, 0, 0, 1944, 1915, 1, 0, 0, 0, 1944, 1932, - 1, 0, 0, 0, 1945, 151, 1, 0, 0, 0, 1946, 1947, 5, 5, 0, 0, 1947, 1948, - 3, 34, 17, 0, 1948, 153, 1, 0, 0, 0, 1949, 1950, 5, 5, 0, 0, 1950, 1951, - 3, 34, 17, 0, 1951, 155, 1, 0, 0, 0, 1952, 1953, 5, 153, 0, 0, 1953, 1955, - 5, 40, 0, 0, 1954, 1956, 3, 64, 32, 0, 1955, 1954, 1, 0, 0, 0, 1956, 1957, - 1, 0, 0, 0, 1957, 1955, 1, 0, 0, 0, 1957, 1958, 1, 0, 0, 0, 1958, 157, - 1, 0, 0, 0, 1959, 1960, 5, 109, 0, 0, 1960, 1962, 5, 40, 0, 0, 1961, 1963, - 3, 64, 32, 0, 1962, 1961, 1, 0, 0, 0, 1963, 1964, 1, 0, 0, 0, 1964, 1962, - 1, 0, 0, 0, 1964, 1965, 1, 0, 0, 0, 1965, 159, 1, 0, 0, 0, 1966, 1967, - 5, 109, 0, 0, 1967, 1968, 5, 40, 0, 0, 1968, 1969, 3, 162, 81, 0, 1969, - 161, 1, 0, 0, 0, 1970, 1972, 3, 64, 32, 0, 1971, 1973, 3, 142, 71, 0, 1972, - 1971, 1, 0, 0, 0, 1972, 1973, 1, 0, 0, 0, 1973, 1981, 1, 0, 0, 0, 1974, - 1975, 5, 5, 0, 0, 1975, 1977, 3, 64, 32, 0, 1976, 1978, 3, 142, 71, 0, - 1977, 1976, 1, 0, 0, 0, 1977, 1978, 1, 0, 0, 0, 1978, 1980, 1, 0, 0, 0, - 1979, 1974, 1, 0, 0, 0, 1980, 1983, 1, 0, 0, 0, 1981, 1979, 1, 0, 0, 0, - 1981, 1982, 1, 0, 0, 0, 1982, 163, 1, 0, 0, 0, 1983, 1981, 1, 0, 0, 0, - 1984, 1985, 3, 86, 43, 0, 1985, 165, 1, 0, 0, 0, 1986, 1987, 3, 86, 43, - 0, 1987, 167, 1, 0, 0, 0, 1988, 1989, 7, 25, 0, 0, 1989, 169, 1, 0, 0, - 0, 1990, 1991, 5, 188, 0, 0, 1991, 171, 1, 0, 0, 0, 1992, 1995, 3, 64, - 32, 0, 1993, 1995, 3, 28, 14, 0, 1994, 1992, 1, 0, 0, 0, 1994, 1993, 1, - 0, 0, 0, 1995, 173, 1, 0, 0, 0, 1996, 1997, 7, 26, 0, 0, 1997, 175, 1, - 0, 0, 0, 1998, 1999, 7, 27, 0, 0, 1999, 177, 1, 0, 0, 0, 2000, 2001, 3, - 224, 112, 0, 2001, 179, 1, 0, 0, 0, 2002, 2003, 3, 224, 112, 0, 2003, 181, - 1, 0, 0, 0, 2004, 2005, 3, 224, 112, 0, 2005, 183, 1, 0, 0, 0, 2006, 2007, - 3, 224, 112, 0, 2007, 185, 1, 0, 0, 0, 2008, 2009, 3, 224, 112, 0, 2009, - 187, 1, 0, 0, 0, 2010, 2011, 3, 224, 112, 0, 2011, 189, 1, 0, 0, 0, 2012, - 2013, 3, 224, 112, 0, 2013, 191, 1, 0, 0, 0, 2014, 2015, 3, 224, 112, 0, - 2015, 193, 1, 0, 0, 0, 2016, 2017, 3, 224, 112, 0, 2017, 195, 1, 0, 0, - 0, 2018, 2019, 3, 224, 112, 0, 2019, 197, 1, 0, 0, 0, 2020, 2021, 3, 224, - 112, 0, 2021, 199, 1, 0, 0, 0, 2022, 2023, 3, 224, 112, 0, 2023, 201, 1, - 0, 0, 0, 2024, 2025, 3, 224, 112, 0, 2025, 203, 1, 0, 0, 0, 2026, 2027, - 3, 224, 112, 0, 2027, 205, 1, 0, 0, 0, 2028, 2029, 3, 224, 112, 0, 2029, - 207, 1, 0, 0, 0, 2030, 2031, 3, 224, 112, 0, 2031, 209, 1, 0, 0, 0, 2032, - 2033, 3, 224, 112, 0, 2033, 211, 1, 0, 0, 0, 2034, 2035, 3, 224, 112, 0, - 2035, 213, 1, 0, 0, 0, 2036, 2037, 3, 224, 112, 0, 2037, 215, 1, 0, 0, - 0, 2038, 2039, 3, 224, 112, 0, 2039, 217, 1, 0, 0, 0, 2040, 2041, 3, 224, - 112, 0, 2041, 219, 1, 0, 0, 0, 2042, 2043, 3, 224, 112, 0, 2043, 221, 1, - 0, 0, 0, 2044, 2045, 3, 224, 112, 0, 2045, 223, 1, 0, 0, 0, 2046, 2054, - 5, 185, 0, 0, 2047, 2054, 3, 176, 88, 0, 2048, 2054, 5, 188, 0, 0, 2049, - 2050, 5, 3, 0, 0, 2050, 2051, 3, 224, 112, 0, 2051, 2052, 5, 4, 0, 0, 2052, - 2054, 1, 0, 0, 0, 2053, 2046, 1, 0, 0, 0, 2053, 2047, 1, 0, 0, 0, 2053, - 2048, 1, 0, 0, 0, 2053, 2049, 1, 0, 0, 0, 2054, 225, 1, 0, 0, 0, 296, 229, - 237, 244, 249, 255, 261, 263, 289, 296, 303, 309, 313, 318, 321, 328, 331, - 335, 343, 347, 349, 353, 357, 361, 364, 371, 377, 383, 388, 399, 405, 409, - 413, 416, 420, 426, 431, 440, 447, 453, 457, 461, 466, 472, 484, 488, 493, - 496, 499, 502, 506, 509, 523, 530, 537, 539, 542, 548, 553, 561, 566, 581, - 587, 597, 602, 612, 616, 618, 622, 627, 629, 637, 643, 648, 655, 666, 669, - 671, 678, 682, 689, 695, 701, 707, 712, 721, 726, 737, 742, 753, 758, 762, - 778, 788, 793, 801, 813, 818, 826, 833, 836, 839, 846, 849, 852, 855, 859, - 867, 872, 882, 887, 896, 903, 907, 911, 914, 922, 935, 938, 946, 955, 959, - 964, 994, 1006, 1011, 1023, 1029, 1036, 1040, 1050, 1053, 1059, 1065, 1074, - 1077, 1081, 1083, 1085, 1094, 1106, 1117, 1121, 1128, 1134, 1139, 1147, - 1152, 1156, 1159, 1163, 1166, 1174, 1185, 1191, 1193, 1201, 1208, 1215, - 1220, 1222, 1228, 1237, 1242, 1249, 1253, 1255, 1258, 1266, 1270, 1273, - 1279, 1283, 1288, 1295, 1304, 1308, 1310, 1314, 1323, 1328, 1330, 1343, - 1346, 1349, 1354, 1358, 1361, 1364, 1369, 1373, 1378, 1381, 1384, 1389, - 1393, 1396, 1403, 1408, 1417, 1422, 1425, 1433, 1437, 1445, 1448, 1450, - 1459, 1462, 1464, 1468, 1472, 1476, 1479, 1490, 1495, 1499, 1503, 1506, - 1511, 1517, 1524, 1531, 1540, 1544, 1546, 1550, 1553, 1561, 1567, 1572, - 1578, 1585, 1592, 1597, 1600, 1603, 1606, 1611, 1616, 1623, 1627, 1631, - 1641, 1650, 1653, 1662, 1666, 1674, 1683, 1686, 1695, 1698, 1701, 1704, - 1714, 1716, 1725, 1734, 1738, 1745, 1752, 1756, 1760, 1769, 1773, 1777, - 1782, 1786, 1793, 1803, 1810, 1815, 1818, 1822, 1836, 1848, 1857, 1866, - 1870, 1880, 1883, 1892, 1901, 1904, 1910, 1924, 1928, 1939, 1944, 1957, - 1964, 1972, 1977, 1981, 1994, 2053, - } - deserializer := antlr.NewATNDeserializer(nil) - staticData.atn = deserializer.Deserialize(staticData.serializedATN) - atn := staticData.atn - staticData.decisionToDFA = make([]*antlr.DFA, len(atn.DecisionToState)) - decisionToDFA := staticData.decisionToDFA - for index, state := range atn.DecisionToState { - decisionToDFA[index] = antlr.NewDFA(state, index) - } -} - -// SQLiteParserInit initializes any static state used to implement SQLiteParser. By default the -// static state used to implement the parser is lazily initialized during the first call to -// NewSQLiteParser(). You can call this function if you wish to initialize the static state ahead -// of time. -func SQLiteParserInit() { - staticData := &sqliteparserParserStaticData - staticData.once.Do(sqliteparserParserInit) -} - -// NewSQLiteParser produces a new parser instance for the optional input antlr.TokenStream. -func NewSQLiteParser(input antlr.TokenStream) *SQLiteParser { - SQLiteParserInit() - this := new(SQLiteParser) - this.BaseParser = antlr.NewBaseParser(input) - staticData := &sqliteparserParserStaticData - this.Interpreter = antlr.NewParserATNSimulator(this, staticData.atn, staticData.decisionToDFA, staticData.predictionContextCache) - this.RuleNames = staticData.ruleNames - this.LiteralNames = staticData.literalNames - this.SymbolicNames = staticData.symbolicNames - this.GrammarFileName = "SQLiteParser.g4" - - return this -} - -// SQLiteParser tokens. -const ( - SQLiteParserEOF = antlr.TokenEOF - SQLiteParserSCOL = 1 - SQLiteParserDOT = 2 - SQLiteParserOPEN_PAR = 3 - SQLiteParserCLOSE_PAR = 4 - SQLiteParserCOMMA = 5 - SQLiteParserASSIGN = 6 - SQLiteParserSTAR = 7 - SQLiteParserPLUS = 8 - SQLiteParserMINUS = 9 - SQLiteParserTILDE = 10 - SQLiteParserPIPE2 = 11 - SQLiteParserDIV = 12 - SQLiteParserMOD = 13 - SQLiteParserLT2 = 14 - SQLiteParserGT2 = 15 - SQLiteParserAMP = 16 - SQLiteParserPIPE = 17 - SQLiteParserLT = 18 - SQLiteParserLT_EQ = 19 - SQLiteParserGT = 20 - SQLiteParserGT_EQ = 21 - SQLiteParserEQ = 22 - SQLiteParserNOT_EQ1 = 23 - SQLiteParserNOT_EQ2 = 24 - SQLiteParserABORT_ = 25 - SQLiteParserACTION_ = 26 - SQLiteParserADD_ = 27 - SQLiteParserAFTER_ = 28 - SQLiteParserALL_ = 29 - SQLiteParserALTER_ = 30 - SQLiteParserANALYZE_ = 31 - SQLiteParserAND_ = 32 - SQLiteParserAS_ = 33 - SQLiteParserASC_ = 34 - SQLiteParserATTACH_ = 35 - SQLiteParserAUTOINCREMENT_ = 36 - SQLiteParserBEFORE_ = 37 - SQLiteParserBEGIN_ = 38 - SQLiteParserBETWEEN_ = 39 - SQLiteParserBY_ = 40 - SQLiteParserCASCADE_ = 41 - SQLiteParserCASE_ = 42 - SQLiteParserCAST_ = 43 - SQLiteParserCHECK_ = 44 - SQLiteParserCOLLATE_ = 45 - SQLiteParserCOLUMN_ = 46 - SQLiteParserCOMMIT_ = 47 - SQLiteParserCONFLICT_ = 48 - SQLiteParserCONSTRAINT_ = 49 - SQLiteParserCREATE_ = 50 - SQLiteParserCROSS_ = 51 - SQLiteParserCURRENT_DATE_ = 52 - SQLiteParserCURRENT_TIME_ = 53 - SQLiteParserCURRENT_TIMESTAMP_ = 54 - SQLiteParserDATABASE_ = 55 - SQLiteParserDEFAULT_ = 56 - SQLiteParserDEFERRABLE_ = 57 - SQLiteParserDEFERRED_ = 58 - SQLiteParserDELETE_ = 59 - SQLiteParserDESC_ = 60 - SQLiteParserDETACH_ = 61 - SQLiteParserDISTINCT_ = 62 - SQLiteParserDROP_ = 63 - SQLiteParserEACH_ = 64 - SQLiteParserELSE_ = 65 - SQLiteParserEND_ = 66 - SQLiteParserESCAPE_ = 67 - SQLiteParserEXCEPT_ = 68 - SQLiteParserEXCLUSIVE_ = 69 - SQLiteParserEXISTS_ = 70 - SQLiteParserEXPLAIN_ = 71 - SQLiteParserFAIL_ = 72 - SQLiteParserFOR_ = 73 - SQLiteParserFOREIGN_ = 74 - SQLiteParserFROM_ = 75 - SQLiteParserFULL_ = 76 - SQLiteParserGLOB_ = 77 - SQLiteParserGROUP_ = 78 - SQLiteParserHAVING_ = 79 - SQLiteParserIF_ = 80 - SQLiteParserIGNORE_ = 81 - SQLiteParserIMMEDIATE_ = 82 - SQLiteParserIN_ = 83 - SQLiteParserINDEX_ = 84 - SQLiteParserINDEXED_ = 85 - SQLiteParserINITIALLY_ = 86 - SQLiteParserINNER_ = 87 - SQLiteParserINSERT_ = 88 - SQLiteParserINSTEAD_ = 89 - SQLiteParserINTERSECT_ = 90 - SQLiteParserINTO_ = 91 - SQLiteParserIS_ = 92 - SQLiteParserISNULL_ = 93 - SQLiteParserJOIN_ = 94 - SQLiteParserKEY_ = 95 - SQLiteParserLEFT_ = 96 - SQLiteParserLIKE_ = 97 - SQLiteParserLIMIT_ = 98 - SQLiteParserMATCH_ = 99 - SQLiteParserNATURAL_ = 100 - SQLiteParserNO_ = 101 - SQLiteParserNOT_ = 102 - SQLiteParserNOTNULL_ = 103 - SQLiteParserNULL_ = 104 - SQLiteParserOF_ = 105 - SQLiteParserOFFSET_ = 106 - SQLiteParserON_ = 107 - SQLiteParserOR_ = 108 - SQLiteParserORDER_ = 109 - SQLiteParserOUTER_ = 110 - SQLiteParserPLAN_ = 111 - SQLiteParserPRAGMA_ = 112 - SQLiteParserPRIMARY_ = 113 - SQLiteParserQUERY_ = 114 - SQLiteParserRAISE_ = 115 - SQLiteParserRECURSIVE_ = 116 - SQLiteParserREFERENCES_ = 117 - SQLiteParserREGEXP_ = 118 - SQLiteParserREINDEX_ = 119 - SQLiteParserRELEASE_ = 120 - SQLiteParserRENAME_ = 121 - SQLiteParserREPLACE_ = 122 - SQLiteParserRESTRICT_ = 123 - SQLiteParserRETURNING_ = 124 - SQLiteParserRIGHT_ = 125 - SQLiteParserROLLBACK_ = 126 - SQLiteParserROW_ = 127 - SQLiteParserROWS_ = 128 - SQLiteParserSAVEPOINT_ = 129 - SQLiteParserSELECT_ = 130 - SQLiteParserSET_ = 131 - SQLiteParserTABLE_ = 132 - SQLiteParserTEMP_ = 133 - SQLiteParserTEMPORARY_ = 134 - SQLiteParserTHEN_ = 135 - SQLiteParserTO_ = 136 - SQLiteParserTRANSACTION_ = 137 - SQLiteParserTRIGGER_ = 138 - SQLiteParserUNION_ = 139 - SQLiteParserUNIQUE_ = 140 - SQLiteParserUPDATE_ = 141 - SQLiteParserUSING_ = 142 - SQLiteParserVACUUM_ = 143 - SQLiteParserVALUES_ = 144 - SQLiteParserVIEW_ = 145 - SQLiteParserVIRTUAL_ = 146 - SQLiteParserWHEN_ = 147 - SQLiteParserWHERE_ = 148 - SQLiteParserWITH_ = 149 - SQLiteParserWITHOUT_ = 150 - SQLiteParserFIRST_VALUE_ = 151 - SQLiteParserOVER_ = 152 - SQLiteParserPARTITION_ = 153 - SQLiteParserRANGE_ = 154 - SQLiteParserPRECEDING_ = 155 - SQLiteParserUNBOUNDED_ = 156 - SQLiteParserCURRENT_ = 157 - SQLiteParserFOLLOWING_ = 158 - SQLiteParserCUME_DIST_ = 159 - SQLiteParserDENSE_RANK_ = 160 - SQLiteParserLAG_ = 161 - SQLiteParserLAST_VALUE_ = 162 - SQLiteParserLEAD_ = 163 - SQLiteParserNTH_VALUE_ = 164 - SQLiteParserNTILE_ = 165 - SQLiteParserPERCENT_RANK_ = 166 - SQLiteParserRANK_ = 167 - SQLiteParserROW_NUMBER_ = 168 - SQLiteParserGENERATED_ = 169 - SQLiteParserALWAYS_ = 170 - SQLiteParserSTORED_ = 171 - SQLiteParserTRUE_ = 172 - SQLiteParserFALSE_ = 173 - SQLiteParserWINDOW_ = 174 - SQLiteParserNULLS_ = 175 - SQLiteParserFIRST_ = 176 - SQLiteParserLAST_ = 177 - SQLiteParserFILTER_ = 178 - SQLiteParserGROUPS_ = 179 - SQLiteParserEXCLUDE_ = 180 - SQLiteParserTIES_ = 181 - SQLiteParserOTHERS_ = 182 - SQLiteParserDO_ = 183 - SQLiteParserNOTHING_ = 184 - SQLiteParserIDENTIFIER = 185 - SQLiteParserNUMERIC_LITERAL = 186 - SQLiteParserBIND_PARAMETER = 187 - SQLiteParserSTRING_LITERAL = 188 - SQLiteParserBLOB_LITERAL = 189 - SQLiteParserSINGLE_LINE_COMMENT = 190 - SQLiteParserMULTILINE_COMMENT = 191 - SQLiteParserSPACES = 192 - SQLiteParserUNEXPECTED_CHAR = 193 -) - -// SQLiteParser rules. -const ( - SQLiteParserRULE_parse = 0 - SQLiteParserRULE_sql_stmt_list = 1 - SQLiteParserRULE_sql_stmt = 2 - SQLiteParserRULE_alter_table_stmt = 3 - SQLiteParserRULE_analyze_stmt = 4 - SQLiteParserRULE_attach_stmt = 5 - SQLiteParserRULE_begin_stmt = 6 - SQLiteParserRULE_commit_stmt = 7 - SQLiteParserRULE_rollback_stmt = 8 - SQLiteParserRULE_savepoint_stmt = 9 - SQLiteParserRULE_release_stmt = 10 - SQLiteParserRULE_create_index_stmt = 11 - SQLiteParserRULE_indexed_column = 12 - SQLiteParserRULE_create_table_stmt = 13 - SQLiteParserRULE_column_def = 14 - SQLiteParserRULE_type_name = 15 - SQLiteParserRULE_column_constraint = 16 - SQLiteParserRULE_signed_number = 17 - SQLiteParserRULE_table_constraint = 18 - SQLiteParserRULE_foreign_key_clause = 19 - SQLiteParserRULE_conflict_clause = 20 - SQLiteParserRULE_create_trigger_stmt = 21 - SQLiteParserRULE_create_view_stmt = 22 - SQLiteParserRULE_create_virtual_table_stmt = 23 - SQLiteParserRULE_with_clause = 24 - SQLiteParserRULE_cte_table_name = 25 - SQLiteParserRULE_recursive_cte = 26 - SQLiteParserRULE_common_table_expression = 27 - SQLiteParserRULE_delete_stmt = 28 - SQLiteParserRULE_delete_stmt_limited = 29 - SQLiteParserRULE_detach_stmt = 30 - SQLiteParserRULE_drop_stmt = 31 - SQLiteParserRULE_expr = 32 - SQLiteParserRULE_raise_function = 33 - SQLiteParserRULE_literal_value = 34 - SQLiteParserRULE_value_row = 35 - SQLiteParserRULE_values_clause = 36 - SQLiteParserRULE_insert_stmt = 37 - SQLiteParserRULE_returning_clause = 38 - SQLiteParserRULE_upsert_clause = 39 - SQLiteParserRULE_pragma_stmt = 40 - SQLiteParserRULE_pragma_value = 41 - SQLiteParserRULE_reindex_stmt = 42 - SQLiteParserRULE_select_stmt = 43 - SQLiteParserRULE_join_clause = 44 - SQLiteParserRULE_select_core = 45 - SQLiteParserRULE_factored_select_stmt = 46 - SQLiteParserRULE_simple_select_stmt = 47 - SQLiteParserRULE_compound_select_stmt = 48 - SQLiteParserRULE_table_or_subquery = 49 - SQLiteParserRULE_result_column = 50 - SQLiteParserRULE_join_operator = 51 - SQLiteParserRULE_join_constraint = 52 - SQLiteParserRULE_compound_operator = 53 - SQLiteParserRULE_update_stmt = 54 - SQLiteParserRULE_column_name_list = 55 - SQLiteParserRULE_update_stmt_limited = 56 - SQLiteParserRULE_qualified_table_name = 57 - SQLiteParserRULE_vacuum_stmt = 58 - SQLiteParserRULE_filter_clause = 59 - SQLiteParserRULE_window_defn = 60 - SQLiteParserRULE_over_clause = 61 - SQLiteParserRULE_frame_spec = 62 - SQLiteParserRULE_frame_clause = 63 - SQLiteParserRULE_simple_function_invocation = 64 - SQLiteParserRULE_aggregate_function_invocation = 65 - SQLiteParserRULE_window_function_invocation = 66 - SQLiteParserRULE_common_table_stmt = 67 - SQLiteParserRULE_order_by_stmt = 68 - SQLiteParserRULE_limit_stmt = 69 - SQLiteParserRULE_ordering_term = 70 - SQLiteParserRULE_asc_desc = 71 - SQLiteParserRULE_frame_left = 72 - SQLiteParserRULE_frame_right = 73 - SQLiteParserRULE_frame_single = 74 - SQLiteParserRULE_window_function = 75 - SQLiteParserRULE_offset = 76 - SQLiteParserRULE_default_value = 77 - SQLiteParserRULE_partition_by = 78 - SQLiteParserRULE_order_by_expr = 79 - SQLiteParserRULE_order_by_expr_asc_desc = 80 - SQLiteParserRULE_expr_asc_desc = 81 - SQLiteParserRULE_initial_select = 82 - SQLiteParserRULE_recursive_select = 83 - SQLiteParserRULE_unary_operator = 84 - SQLiteParserRULE_error_message = 85 - SQLiteParserRULE_module_argument = 86 - SQLiteParserRULE_column_alias = 87 - SQLiteParserRULE_keyword = 88 - SQLiteParserRULE_name = 89 - SQLiteParserRULE_function_name = 90 - SQLiteParserRULE_schema_name = 91 - SQLiteParserRULE_table_name = 92 - SQLiteParserRULE_table_or_index_name = 93 - SQLiteParserRULE_column_name = 94 - SQLiteParserRULE_collation_name = 95 - SQLiteParserRULE_foreign_table = 96 - SQLiteParserRULE_index_name = 97 - SQLiteParserRULE_trigger_name = 98 - SQLiteParserRULE_view_name = 99 - SQLiteParserRULE_module_name = 100 - SQLiteParserRULE_pragma_name = 101 - SQLiteParserRULE_savepoint_name = 102 - SQLiteParserRULE_table_alias = 103 - SQLiteParserRULE_transaction_name = 104 - SQLiteParserRULE_window_name = 105 - SQLiteParserRULE_alias = 106 - SQLiteParserRULE_filename = 107 - SQLiteParserRULE_base_window_name = 108 - SQLiteParserRULE_simple_func = 109 - SQLiteParserRULE_aggregate_func = 110 - SQLiteParserRULE_table_function_name = 111 - SQLiteParserRULE_any_name = 112 -) - -// IParseContext is an interface to support dynamic dispatch. -type IParseContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - EOF() antlr.TerminalNode - AllSql_stmt_list() []ISql_stmt_listContext - Sql_stmt_list(i int) ISql_stmt_listContext - - // IsParseContext differentiates from other interfaces. - IsParseContext() -} - -type ParseContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyParseContext() *ParseContext { - var p = new(ParseContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_parse - return p -} - -func (*ParseContext) IsParseContext() {} - -func NewParseContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ParseContext { - var p = new(ParseContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_parse - - return p -} - -func (s *ParseContext) GetParser() antlr.Parser { return s.parser } - -func (s *ParseContext) EOF() antlr.TerminalNode { - return s.GetToken(SQLiteParserEOF, 0) -} - -func (s *ParseContext) AllSql_stmt_list() []ISql_stmt_listContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(ISql_stmt_listContext); ok { - len++ - } - } - - tst := make([]ISql_stmt_listContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(ISql_stmt_listContext); ok { - tst[i] = t.(ISql_stmt_listContext) - i++ - } - } - - return tst -} - -func (s *ParseContext) Sql_stmt_list(i int) ISql_stmt_listContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ISql_stmt_listContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(ISql_stmt_listContext) -} - -func (s *ParseContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *ParseContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *ParseContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterParse(s) - } -} - -func (s *ParseContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitParse(s) - } -} - -func (p *SQLiteParser) Parse() (localctx IParseContext) { - this := p - _ = this - - localctx = NewParseContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 0, SQLiteParserRULE_parse) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - p.SetState(229) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for ((int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&-6339801325483589630) != 0) || ((int64((_la-66)) & ^0x3f) == 0 && ((int64(1)<<(_la-66))&-7971300971697405919) != 0) || ((int64((_la-130)) & ^0x3f) == 0 && ((int64(1)<<(_la-130))&550913) != 0) { - { - p.SetState(226) - p.Sql_stmt_list() - } - - p.SetState(231) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - { - p.SetState(232) - p.Match(SQLiteParserEOF) - } - - return localctx -} - -// ISql_stmt_listContext is an interface to support dynamic dispatch. -type ISql_stmt_listContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - AllSql_stmt() []ISql_stmtContext - Sql_stmt(i int) ISql_stmtContext - AllSCOL() []antlr.TerminalNode - SCOL(i int) antlr.TerminalNode - - // IsSql_stmt_listContext differentiates from other interfaces. - IsSql_stmt_listContext() -} - -type Sql_stmt_listContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptySql_stmt_listContext() *Sql_stmt_listContext { - var p = new(Sql_stmt_listContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_sql_stmt_list - return p -} - -func (*Sql_stmt_listContext) IsSql_stmt_listContext() {} - -func NewSql_stmt_listContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Sql_stmt_listContext { - var p = new(Sql_stmt_listContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_sql_stmt_list - - return p -} - -func (s *Sql_stmt_listContext) GetParser() antlr.Parser { return s.parser } - -func (s *Sql_stmt_listContext) AllSql_stmt() []ISql_stmtContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(ISql_stmtContext); ok { - len++ - } - } - - tst := make([]ISql_stmtContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(ISql_stmtContext); ok { - tst[i] = t.(ISql_stmtContext) - i++ - } - } - - return tst -} - -func (s *Sql_stmt_listContext) Sql_stmt(i int) ISql_stmtContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ISql_stmtContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(ISql_stmtContext) -} - -func (s *Sql_stmt_listContext) AllSCOL() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserSCOL) -} - -func (s *Sql_stmt_listContext) SCOL(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserSCOL, i) -} - -func (s *Sql_stmt_listContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Sql_stmt_listContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Sql_stmt_listContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterSql_stmt_list(s) - } -} - -func (s *Sql_stmt_listContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitSql_stmt_list(s) - } -} - -func (p *SQLiteParser) Sql_stmt_list() (localctx ISql_stmt_listContext) { - this := p - _ = this - - localctx = NewSql_stmt_listContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 2, SQLiteParserRULE_sql_stmt_list) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - var _alt int - - p.EnterOuterAlt(localctx, 1) - p.SetState(237) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for _la == SQLiteParserSCOL { - { - p.SetState(234) - p.Match(SQLiteParserSCOL) - } - - p.SetState(239) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - { - p.SetState(240) - p.Sql_stmt() - } - p.SetState(249) - p.GetErrorHandler().Sync(p) - _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 3, p.GetParserRuleContext()) - - for _alt != 2 && _alt != antlr.ATNInvalidAltNumber { - if _alt == 1 { - p.SetState(242) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for ok := true; ok; ok = _la == SQLiteParserSCOL { - { - p.SetState(241) - p.Match(SQLiteParserSCOL) - } - - p.SetState(244) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - { - p.SetState(246) - p.Sql_stmt() - } - - } - p.SetState(251) - p.GetErrorHandler().Sync(p) - _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 3, p.GetParserRuleContext()) - } - p.SetState(255) - p.GetErrorHandler().Sync(p) - _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 4, p.GetParserRuleContext()) - - for _alt != 2 && _alt != antlr.ATNInvalidAltNumber { - if _alt == 1 { - { - p.SetState(252) - p.Match(SQLiteParserSCOL) - } - - } - p.SetState(257) - p.GetErrorHandler().Sync(p) - _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 4, p.GetParserRuleContext()) - } - - return localctx -} - -// ISql_stmtContext is an interface to support dynamic dispatch. -type ISql_stmtContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - Alter_table_stmt() IAlter_table_stmtContext - Analyze_stmt() IAnalyze_stmtContext - Attach_stmt() IAttach_stmtContext - Begin_stmt() IBegin_stmtContext - Commit_stmt() ICommit_stmtContext - Create_index_stmt() ICreate_index_stmtContext - Create_table_stmt() ICreate_table_stmtContext - Create_trigger_stmt() ICreate_trigger_stmtContext - Create_view_stmt() ICreate_view_stmtContext - Create_virtual_table_stmt() ICreate_virtual_table_stmtContext - Delete_stmt() IDelete_stmtContext - Delete_stmt_limited() IDelete_stmt_limitedContext - Detach_stmt() IDetach_stmtContext - Drop_stmt() IDrop_stmtContext - Insert_stmt() IInsert_stmtContext - Pragma_stmt() IPragma_stmtContext - Reindex_stmt() IReindex_stmtContext - Release_stmt() IRelease_stmtContext - Rollback_stmt() IRollback_stmtContext - Savepoint_stmt() ISavepoint_stmtContext - Select_stmt() ISelect_stmtContext - Update_stmt() IUpdate_stmtContext - Update_stmt_limited() IUpdate_stmt_limitedContext - Vacuum_stmt() IVacuum_stmtContext - EXPLAIN_() antlr.TerminalNode - QUERY_() antlr.TerminalNode - PLAN_() antlr.TerminalNode - - // IsSql_stmtContext differentiates from other interfaces. - IsSql_stmtContext() -} - -type Sql_stmtContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptySql_stmtContext() *Sql_stmtContext { - var p = new(Sql_stmtContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_sql_stmt - return p -} - -func (*Sql_stmtContext) IsSql_stmtContext() {} - -func NewSql_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Sql_stmtContext { - var p = new(Sql_stmtContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_sql_stmt - - return p -} - -func (s *Sql_stmtContext) GetParser() antlr.Parser { return s.parser } - -func (s *Sql_stmtContext) Alter_table_stmt() IAlter_table_stmtContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IAlter_table_stmtContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IAlter_table_stmtContext) -} - -func (s *Sql_stmtContext) Analyze_stmt() IAnalyze_stmtContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IAnalyze_stmtContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IAnalyze_stmtContext) -} - -func (s *Sql_stmtContext) Attach_stmt() IAttach_stmtContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IAttach_stmtContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IAttach_stmtContext) -} - -func (s *Sql_stmtContext) Begin_stmt() IBegin_stmtContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IBegin_stmtContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IBegin_stmtContext) -} - -func (s *Sql_stmtContext) Commit_stmt() ICommit_stmtContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ICommit_stmtContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ICommit_stmtContext) -} - -func (s *Sql_stmtContext) Create_index_stmt() ICreate_index_stmtContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ICreate_index_stmtContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ICreate_index_stmtContext) -} - -func (s *Sql_stmtContext) Create_table_stmt() ICreate_table_stmtContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ICreate_table_stmtContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ICreate_table_stmtContext) -} - -func (s *Sql_stmtContext) Create_trigger_stmt() ICreate_trigger_stmtContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ICreate_trigger_stmtContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ICreate_trigger_stmtContext) -} - -func (s *Sql_stmtContext) Create_view_stmt() ICreate_view_stmtContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ICreate_view_stmtContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ICreate_view_stmtContext) -} - -func (s *Sql_stmtContext) Create_virtual_table_stmt() ICreate_virtual_table_stmtContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ICreate_virtual_table_stmtContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ICreate_virtual_table_stmtContext) -} - -func (s *Sql_stmtContext) Delete_stmt() IDelete_stmtContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IDelete_stmtContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IDelete_stmtContext) -} - -func (s *Sql_stmtContext) Delete_stmt_limited() IDelete_stmt_limitedContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IDelete_stmt_limitedContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IDelete_stmt_limitedContext) -} - -func (s *Sql_stmtContext) Detach_stmt() IDetach_stmtContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IDetach_stmtContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IDetach_stmtContext) -} - -func (s *Sql_stmtContext) Drop_stmt() IDrop_stmtContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IDrop_stmtContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IDrop_stmtContext) -} - -func (s *Sql_stmtContext) Insert_stmt() IInsert_stmtContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IInsert_stmtContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IInsert_stmtContext) -} - -func (s *Sql_stmtContext) Pragma_stmt() IPragma_stmtContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IPragma_stmtContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IPragma_stmtContext) -} - -func (s *Sql_stmtContext) Reindex_stmt() IReindex_stmtContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IReindex_stmtContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IReindex_stmtContext) -} - -func (s *Sql_stmtContext) Release_stmt() IRelease_stmtContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IRelease_stmtContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IRelease_stmtContext) -} - -func (s *Sql_stmtContext) Rollback_stmt() IRollback_stmtContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IRollback_stmtContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IRollback_stmtContext) -} - -func (s *Sql_stmtContext) Savepoint_stmt() ISavepoint_stmtContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ISavepoint_stmtContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ISavepoint_stmtContext) -} - -func (s *Sql_stmtContext) Select_stmt() ISelect_stmtContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ISelect_stmtContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ISelect_stmtContext) -} - -func (s *Sql_stmtContext) Update_stmt() IUpdate_stmtContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IUpdate_stmtContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IUpdate_stmtContext) -} - -func (s *Sql_stmtContext) Update_stmt_limited() IUpdate_stmt_limitedContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IUpdate_stmt_limitedContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IUpdate_stmt_limitedContext) -} - -func (s *Sql_stmtContext) Vacuum_stmt() IVacuum_stmtContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IVacuum_stmtContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IVacuum_stmtContext) -} - -func (s *Sql_stmtContext) EXPLAIN_() antlr.TerminalNode { - return s.GetToken(SQLiteParserEXPLAIN_, 0) -} - -func (s *Sql_stmtContext) QUERY_() antlr.TerminalNode { - return s.GetToken(SQLiteParserQUERY_, 0) -} - -func (s *Sql_stmtContext) PLAN_() antlr.TerminalNode { - return s.GetToken(SQLiteParserPLAN_, 0) -} - -func (s *Sql_stmtContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Sql_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Sql_stmtContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterSql_stmt(s) - } -} - -func (s *Sql_stmtContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitSql_stmt(s) - } -} - -func (p *SQLiteParser) Sql_stmt() (localctx ISql_stmtContext) { - this := p - _ = this - - localctx = NewSql_stmtContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 4, SQLiteParserRULE_sql_stmt) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - p.SetState(263) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserEXPLAIN_ { - { - p.SetState(258) - p.Match(SQLiteParserEXPLAIN_) - } - p.SetState(261) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserQUERY_ { - { - p.SetState(259) - p.Match(SQLiteParserQUERY_) - } - { - p.SetState(260) - p.Match(SQLiteParserPLAN_) - } - - } - - } - p.SetState(289) - p.GetErrorHandler().Sync(p) - switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 7, p.GetParserRuleContext()) { - case 1: - { - p.SetState(265) - p.Alter_table_stmt() - } - - case 2: - { - p.SetState(266) - p.Analyze_stmt() - } - - case 3: - { - p.SetState(267) - p.Attach_stmt() - } - - case 4: - { - p.SetState(268) - p.Begin_stmt() - } - - case 5: - { - p.SetState(269) - p.Commit_stmt() - } - - case 6: - { - p.SetState(270) - p.Create_index_stmt() - } - - case 7: - { - p.SetState(271) - p.Create_table_stmt() - } - - case 8: - { - p.SetState(272) - p.Create_trigger_stmt() - } - - case 9: - { - p.SetState(273) - p.Create_view_stmt() - } - - case 10: - { - p.SetState(274) - p.Create_virtual_table_stmt() - } - - case 11: - { - p.SetState(275) - p.Delete_stmt() - } - - case 12: - { - p.SetState(276) - p.Delete_stmt_limited() - } - - case 13: - { - p.SetState(277) - p.Detach_stmt() - } - - case 14: - { - p.SetState(278) - p.Drop_stmt() - } - - case 15: - { - p.SetState(279) - p.Insert_stmt() - } - - case 16: - { - p.SetState(280) - p.Pragma_stmt() - } - - case 17: - { - p.SetState(281) - p.Reindex_stmt() - } - - case 18: - { - p.SetState(282) - p.Release_stmt() - } - - case 19: - { - p.SetState(283) - p.Rollback_stmt() - } - - case 20: - { - p.SetState(284) - p.Savepoint_stmt() - } - - case 21: - { - p.SetState(285) - p.Select_stmt() - } - - case 22: - { - p.SetState(286) - p.Update_stmt() - } - - case 23: - { - p.SetState(287) - p.Update_stmt_limited() - } - - case 24: - { - p.SetState(288) - p.Vacuum_stmt() - } - - } - - return localctx -} - -// IAlter_table_stmtContext is an interface to support dynamic dispatch. -type IAlter_table_stmtContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // GetNew_table_name returns the new_table_name rule contexts. - GetNew_table_name() ITable_nameContext - - // GetOld_column_name returns the old_column_name rule contexts. - GetOld_column_name() IColumn_nameContext - - // GetNew_column_name returns the new_column_name rule contexts. - GetNew_column_name() IColumn_nameContext - - // SetNew_table_name sets the new_table_name rule contexts. - SetNew_table_name(ITable_nameContext) - - // SetOld_column_name sets the old_column_name rule contexts. - SetOld_column_name(IColumn_nameContext) - - // SetNew_column_name sets the new_column_name rule contexts. - SetNew_column_name(IColumn_nameContext) - - // Getter signatures - ALTER_() antlr.TerminalNode - TABLE_() antlr.TerminalNode - AllTable_name() []ITable_nameContext - Table_name(i int) ITable_nameContext - RENAME_() antlr.TerminalNode - ADD_() antlr.TerminalNode - Column_def() IColumn_defContext - DROP_() antlr.TerminalNode - AllColumn_name() []IColumn_nameContext - Column_name(i int) IColumn_nameContext - Schema_name() ISchema_nameContext - DOT() antlr.TerminalNode - TO_() antlr.TerminalNode - COLUMN_() antlr.TerminalNode - - // IsAlter_table_stmtContext differentiates from other interfaces. - IsAlter_table_stmtContext() -} - -type Alter_table_stmtContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser - new_table_name ITable_nameContext - old_column_name IColumn_nameContext - new_column_name IColumn_nameContext -} - -func NewEmptyAlter_table_stmtContext() *Alter_table_stmtContext { - var p = new(Alter_table_stmtContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_alter_table_stmt - return p -} - -func (*Alter_table_stmtContext) IsAlter_table_stmtContext() {} - -func NewAlter_table_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Alter_table_stmtContext { - var p = new(Alter_table_stmtContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_alter_table_stmt - - return p -} - -func (s *Alter_table_stmtContext) GetParser() antlr.Parser { return s.parser } - -func (s *Alter_table_stmtContext) GetNew_table_name() ITable_nameContext { return s.new_table_name } - -func (s *Alter_table_stmtContext) GetOld_column_name() IColumn_nameContext { return s.old_column_name } - -func (s *Alter_table_stmtContext) GetNew_column_name() IColumn_nameContext { return s.new_column_name } - -func (s *Alter_table_stmtContext) SetNew_table_name(v ITable_nameContext) { s.new_table_name = v } - -func (s *Alter_table_stmtContext) SetOld_column_name(v IColumn_nameContext) { s.old_column_name = v } - -func (s *Alter_table_stmtContext) SetNew_column_name(v IColumn_nameContext) { s.new_column_name = v } - -func (s *Alter_table_stmtContext) ALTER_() antlr.TerminalNode { - return s.GetToken(SQLiteParserALTER_, 0) -} - -func (s *Alter_table_stmtContext) TABLE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserTABLE_, 0) -} - -func (s *Alter_table_stmtContext) AllTable_name() []ITable_nameContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(ITable_nameContext); ok { - len++ - } - } - - tst := make([]ITable_nameContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(ITable_nameContext); ok { - tst[i] = t.(ITable_nameContext) - i++ - } - } - - return tst -} - -func (s *Alter_table_stmtContext) Table_name(i int) ITable_nameContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ITable_nameContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(ITable_nameContext) -} - -func (s *Alter_table_stmtContext) RENAME_() antlr.TerminalNode { - return s.GetToken(SQLiteParserRENAME_, 0) -} - -func (s *Alter_table_stmtContext) ADD_() antlr.TerminalNode { - return s.GetToken(SQLiteParserADD_, 0) -} - -func (s *Alter_table_stmtContext) Column_def() IColumn_defContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IColumn_defContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IColumn_defContext) -} - -func (s *Alter_table_stmtContext) DROP_() antlr.TerminalNode { - return s.GetToken(SQLiteParserDROP_, 0) -} - -func (s *Alter_table_stmtContext) AllColumn_name() []IColumn_nameContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IColumn_nameContext); ok { - len++ - } - } - - tst := make([]IColumn_nameContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IColumn_nameContext); ok { - tst[i] = t.(IColumn_nameContext) - i++ - } - } - - return tst -} - -func (s *Alter_table_stmtContext) Column_name(i int) IColumn_nameContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IColumn_nameContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IColumn_nameContext) -} - -func (s *Alter_table_stmtContext) Schema_name() ISchema_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ISchema_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ISchema_nameContext) -} - -func (s *Alter_table_stmtContext) DOT() antlr.TerminalNode { - return s.GetToken(SQLiteParserDOT, 0) -} - -func (s *Alter_table_stmtContext) TO_() antlr.TerminalNode { - return s.GetToken(SQLiteParserTO_, 0) -} - -func (s *Alter_table_stmtContext) COLUMN_() antlr.TerminalNode { - return s.GetToken(SQLiteParserCOLUMN_, 0) -} - -func (s *Alter_table_stmtContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Alter_table_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Alter_table_stmtContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterAlter_table_stmt(s) - } -} - -func (s *Alter_table_stmtContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitAlter_table_stmt(s) - } -} - -func (p *SQLiteParser) Alter_table_stmt() (localctx IAlter_table_stmtContext) { - this := p - _ = this - - localctx = NewAlter_table_stmtContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 6, SQLiteParserRULE_alter_table_stmt) - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(291) - p.Match(SQLiteParserALTER_) - } - { - p.SetState(292) - p.Match(SQLiteParserTABLE_) - } - p.SetState(296) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 8, p.GetParserRuleContext()) == 1 { - { - p.SetState(293) - p.Schema_name() - } - { - p.SetState(294) - p.Match(SQLiteParserDOT) - } - - } - { - p.SetState(298) - p.Table_name() - } - p.SetState(321) - p.GetErrorHandler().Sync(p) - - switch p.GetTokenStream().LA(1) { - case SQLiteParserRENAME_: - { - p.SetState(299) - p.Match(SQLiteParserRENAME_) - } - p.SetState(309) - p.GetErrorHandler().Sync(p) - switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 10, p.GetParserRuleContext()) { - case 1: - { - p.SetState(300) - p.Match(SQLiteParserTO_) - } - { - p.SetState(301) - - var _x = p.Table_name() - - localctx.(*Alter_table_stmtContext).new_table_name = _x - } - - case 2: - p.SetState(303) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 9, p.GetParserRuleContext()) == 1 { - { - p.SetState(302) - p.Match(SQLiteParserCOLUMN_) - } - - } - { - p.SetState(305) - - var _x = p.Column_name() - - localctx.(*Alter_table_stmtContext).old_column_name = _x - } - { - p.SetState(306) - p.Match(SQLiteParserTO_) - } - { - p.SetState(307) - - var _x = p.Column_name() - - localctx.(*Alter_table_stmtContext).new_column_name = _x - } - - } - - case SQLiteParserADD_: - { - p.SetState(311) - p.Match(SQLiteParserADD_) - } - p.SetState(313) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 11, p.GetParserRuleContext()) == 1 { - { - p.SetState(312) - p.Match(SQLiteParserCOLUMN_) - } - - } - { - p.SetState(315) - p.Column_def() - } - - case SQLiteParserDROP_: - { - p.SetState(316) - p.Match(SQLiteParserDROP_) - } - p.SetState(318) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 12, p.GetParserRuleContext()) == 1 { - { - p.SetState(317) - p.Match(SQLiteParserCOLUMN_) - } - - } - { - p.SetState(320) - p.Column_name() - } - - default: - panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) - } - - return localctx -} - -// IAnalyze_stmtContext is an interface to support dynamic dispatch. -type IAnalyze_stmtContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - ANALYZE_() antlr.TerminalNode - Schema_name() ISchema_nameContext - Table_or_index_name() ITable_or_index_nameContext - DOT() antlr.TerminalNode - - // IsAnalyze_stmtContext differentiates from other interfaces. - IsAnalyze_stmtContext() -} - -type Analyze_stmtContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyAnalyze_stmtContext() *Analyze_stmtContext { - var p = new(Analyze_stmtContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_analyze_stmt - return p -} - -func (*Analyze_stmtContext) IsAnalyze_stmtContext() {} - -func NewAnalyze_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Analyze_stmtContext { - var p = new(Analyze_stmtContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_analyze_stmt - - return p -} - -func (s *Analyze_stmtContext) GetParser() antlr.Parser { return s.parser } - -func (s *Analyze_stmtContext) ANALYZE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserANALYZE_, 0) -} - -func (s *Analyze_stmtContext) Schema_name() ISchema_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ISchema_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ISchema_nameContext) -} - -func (s *Analyze_stmtContext) Table_or_index_name() ITable_or_index_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ITable_or_index_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ITable_or_index_nameContext) -} - -func (s *Analyze_stmtContext) DOT() antlr.TerminalNode { - return s.GetToken(SQLiteParserDOT, 0) -} - -func (s *Analyze_stmtContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Analyze_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Analyze_stmtContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterAnalyze_stmt(s) - } -} - -func (s *Analyze_stmtContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitAnalyze_stmt(s) - } -} - -func (p *SQLiteParser) Analyze_stmt() (localctx IAnalyze_stmtContext) { - this := p - _ = this - - localctx = NewAnalyze_stmtContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 8, SQLiteParserRULE_analyze_stmt) - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(323) - p.Match(SQLiteParserANALYZE_) - } - p.SetState(331) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 15, p.GetParserRuleContext()) == 1 { - { - p.SetState(324) - p.Schema_name() - } - - } else if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 15, p.GetParserRuleContext()) == 2 { - p.SetState(328) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 14, p.GetParserRuleContext()) == 1 { - { - p.SetState(325) - p.Schema_name() - } - { - p.SetState(326) - p.Match(SQLiteParserDOT) - } - - } - { - p.SetState(330) - p.Table_or_index_name() - } - - } - - return localctx -} - -// IAttach_stmtContext is an interface to support dynamic dispatch. -type IAttach_stmtContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - ATTACH_() antlr.TerminalNode - Expr() IExprContext - AS_() antlr.TerminalNode - Schema_name() ISchema_nameContext - DATABASE_() antlr.TerminalNode - - // IsAttach_stmtContext differentiates from other interfaces. - IsAttach_stmtContext() -} - -type Attach_stmtContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyAttach_stmtContext() *Attach_stmtContext { - var p = new(Attach_stmtContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_attach_stmt - return p -} - -func (*Attach_stmtContext) IsAttach_stmtContext() {} - -func NewAttach_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Attach_stmtContext { - var p = new(Attach_stmtContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_attach_stmt - - return p -} - -func (s *Attach_stmtContext) GetParser() antlr.Parser { return s.parser } - -func (s *Attach_stmtContext) ATTACH_() antlr.TerminalNode { - return s.GetToken(SQLiteParserATTACH_, 0) -} - -func (s *Attach_stmtContext) Expr() IExprContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IExprContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IExprContext) -} - -func (s *Attach_stmtContext) AS_() antlr.TerminalNode { - return s.GetToken(SQLiteParserAS_, 0) -} - -func (s *Attach_stmtContext) Schema_name() ISchema_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ISchema_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ISchema_nameContext) -} - -func (s *Attach_stmtContext) DATABASE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserDATABASE_, 0) -} - -func (s *Attach_stmtContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Attach_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Attach_stmtContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterAttach_stmt(s) - } -} - -func (s *Attach_stmtContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitAttach_stmt(s) - } -} - -func (p *SQLiteParser) Attach_stmt() (localctx IAttach_stmtContext) { - this := p - _ = this - - localctx = NewAttach_stmtContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 10, SQLiteParserRULE_attach_stmt) - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(333) - p.Match(SQLiteParserATTACH_) - } - p.SetState(335) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 16, p.GetParserRuleContext()) == 1 { - { - p.SetState(334) - p.Match(SQLiteParserDATABASE_) - } - - } - { - p.SetState(337) - p.expr(0) - } - { - p.SetState(338) - p.Match(SQLiteParserAS_) - } - { - p.SetState(339) - p.Schema_name() - } - - return localctx -} - -// IBegin_stmtContext is an interface to support dynamic dispatch. -type IBegin_stmtContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - BEGIN_() antlr.TerminalNode - TRANSACTION_() antlr.TerminalNode - DEFERRED_() antlr.TerminalNode - IMMEDIATE_() antlr.TerminalNode - EXCLUSIVE_() antlr.TerminalNode - Transaction_name() ITransaction_nameContext - - // IsBegin_stmtContext differentiates from other interfaces. - IsBegin_stmtContext() -} - -type Begin_stmtContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyBegin_stmtContext() *Begin_stmtContext { - var p = new(Begin_stmtContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_begin_stmt - return p -} - -func (*Begin_stmtContext) IsBegin_stmtContext() {} - -func NewBegin_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Begin_stmtContext { - var p = new(Begin_stmtContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_begin_stmt - - return p -} - -func (s *Begin_stmtContext) GetParser() antlr.Parser { return s.parser } - -func (s *Begin_stmtContext) BEGIN_() antlr.TerminalNode { - return s.GetToken(SQLiteParserBEGIN_, 0) -} - -func (s *Begin_stmtContext) TRANSACTION_() antlr.TerminalNode { - return s.GetToken(SQLiteParserTRANSACTION_, 0) -} - -func (s *Begin_stmtContext) DEFERRED_() antlr.TerminalNode { - return s.GetToken(SQLiteParserDEFERRED_, 0) -} - -func (s *Begin_stmtContext) IMMEDIATE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserIMMEDIATE_, 0) -} - -func (s *Begin_stmtContext) EXCLUSIVE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserEXCLUSIVE_, 0) -} - -func (s *Begin_stmtContext) Transaction_name() ITransaction_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ITransaction_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ITransaction_nameContext) -} - -func (s *Begin_stmtContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Begin_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Begin_stmtContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterBegin_stmt(s) - } -} - -func (s *Begin_stmtContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitBegin_stmt(s) - } -} - -func (p *SQLiteParser) Begin_stmt() (localctx IBegin_stmtContext) { - this := p - _ = this - - localctx = NewBegin_stmtContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 12, SQLiteParserRULE_begin_stmt) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(341) - p.Match(SQLiteParserBEGIN_) - } - p.SetState(343) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if (int64((_la-58)) & ^0x3f) == 0 && ((int64(1)<<(_la-58))&16779265) != 0 { - { - p.SetState(342) - _la = p.GetTokenStream().LA(1) - - if !((int64((_la-58)) & ^0x3f) == 0 && ((int64(1)<<(_la-58))&16779265) != 0) { - p.GetErrorHandler().RecoverInline(p) - } else { - p.GetErrorHandler().ReportMatch(p) - p.Consume() - } - } - - } - p.SetState(349) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserTRANSACTION_ { - { - p.SetState(345) - p.Match(SQLiteParserTRANSACTION_) - } - p.SetState(347) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 18, p.GetParserRuleContext()) == 1 { - { - p.SetState(346) - p.Transaction_name() - } - - } - - } - - return localctx -} - -// ICommit_stmtContext is an interface to support dynamic dispatch. -type ICommit_stmtContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - COMMIT_() antlr.TerminalNode - END_() antlr.TerminalNode - TRANSACTION_() antlr.TerminalNode - - // IsCommit_stmtContext differentiates from other interfaces. - IsCommit_stmtContext() -} - -type Commit_stmtContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyCommit_stmtContext() *Commit_stmtContext { - var p = new(Commit_stmtContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_commit_stmt - return p -} - -func (*Commit_stmtContext) IsCommit_stmtContext() {} - -func NewCommit_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Commit_stmtContext { - var p = new(Commit_stmtContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_commit_stmt - - return p -} - -func (s *Commit_stmtContext) GetParser() antlr.Parser { return s.parser } - -func (s *Commit_stmtContext) COMMIT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserCOMMIT_, 0) -} - -func (s *Commit_stmtContext) END_() antlr.TerminalNode { - return s.GetToken(SQLiteParserEND_, 0) -} - -func (s *Commit_stmtContext) TRANSACTION_() antlr.TerminalNode { - return s.GetToken(SQLiteParserTRANSACTION_, 0) -} - -func (s *Commit_stmtContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Commit_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Commit_stmtContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterCommit_stmt(s) - } -} - -func (s *Commit_stmtContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitCommit_stmt(s) - } -} - -func (p *SQLiteParser) Commit_stmt() (localctx ICommit_stmtContext) { - this := p - _ = this - - localctx = NewCommit_stmtContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 14, SQLiteParserRULE_commit_stmt) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(351) - _la = p.GetTokenStream().LA(1) - - if !(_la == SQLiteParserCOMMIT_ || _la == SQLiteParserEND_) { - p.GetErrorHandler().RecoverInline(p) - } else { - p.GetErrorHandler().ReportMatch(p) - p.Consume() - } - } - p.SetState(353) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserTRANSACTION_ { - { - p.SetState(352) - p.Match(SQLiteParserTRANSACTION_) - } - - } - - return localctx -} - -// IRollback_stmtContext is an interface to support dynamic dispatch. -type IRollback_stmtContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - ROLLBACK_() antlr.TerminalNode - TRANSACTION_() antlr.TerminalNode - TO_() antlr.TerminalNode - Savepoint_name() ISavepoint_nameContext - SAVEPOINT_() antlr.TerminalNode - - // IsRollback_stmtContext differentiates from other interfaces. - IsRollback_stmtContext() -} - -type Rollback_stmtContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyRollback_stmtContext() *Rollback_stmtContext { - var p = new(Rollback_stmtContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_rollback_stmt - return p -} - -func (*Rollback_stmtContext) IsRollback_stmtContext() {} - -func NewRollback_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Rollback_stmtContext { - var p = new(Rollback_stmtContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_rollback_stmt - - return p -} - -func (s *Rollback_stmtContext) GetParser() antlr.Parser { return s.parser } - -func (s *Rollback_stmtContext) ROLLBACK_() antlr.TerminalNode { - return s.GetToken(SQLiteParserROLLBACK_, 0) -} - -func (s *Rollback_stmtContext) TRANSACTION_() antlr.TerminalNode { - return s.GetToken(SQLiteParserTRANSACTION_, 0) -} - -func (s *Rollback_stmtContext) TO_() antlr.TerminalNode { - return s.GetToken(SQLiteParserTO_, 0) -} - -func (s *Rollback_stmtContext) Savepoint_name() ISavepoint_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ISavepoint_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ISavepoint_nameContext) -} - -func (s *Rollback_stmtContext) SAVEPOINT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserSAVEPOINT_, 0) -} - -func (s *Rollback_stmtContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Rollback_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Rollback_stmtContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterRollback_stmt(s) - } -} - -func (s *Rollback_stmtContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitRollback_stmt(s) - } -} - -func (p *SQLiteParser) Rollback_stmt() (localctx IRollback_stmtContext) { - this := p - _ = this - - localctx = NewRollback_stmtContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 16, SQLiteParserRULE_rollback_stmt) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(355) - p.Match(SQLiteParserROLLBACK_) - } - p.SetState(357) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserTRANSACTION_ { - { - p.SetState(356) - p.Match(SQLiteParserTRANSACTION_) - } - - } - p.SetState(364) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserTO_ { - { - p.SetState(359) - p.Match(SQLiteParserTO_) - } - p.SetState(361) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 22, p.GetParserRuleContext()) == 1 { - { - p.SetState(360) - p.Match(SQLiteParserSAVEPOINT_) - } - - } - { - p.SetState(363) - p.Savepoint_name() - } - - } - - return localctx -} - -// ISavepoint_stmtContext is an interface to support dynamic dispatch. -type ISavepoint_stmtContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - SAVEPOINT_() antlr.TerminalNode - Savepoint_name() ISavepoint_nameContext - - // IsSavepoint_stmtContext differentiates from other interfaces. - IsSavepoint_stmtContext() -} - -type Savepoint_stmtContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptySavepoint_stmtContext() *Savepoint_stmtContext { - var p = new(Savepoint_stmtContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_savepoint_stmt - return p -} - -func (*Savepoint_stmtContext) IsSavepoint_stmtContext() {} - -func NewSavepoint_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Savepoint_stmtContext { - var p = new(Savepoint_stmtContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_savepoint_stmt - - return p -} - -func (s *Savepoint_stmtContext) GetParser() antlr.Parser { return s.parser } - -func (s *Savepoint_stmtContext) SAVEPOINT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserSAVEPOINT_, 0) -} - -func (s *Savepoint_stmtContext) Savepoint_name() ISavepoint_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ISavepoint_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ISavepoint_nameContext) -} - -func (s *Savepoint_stmtContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Savepoint_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Savepoint_stmtContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterSavepoint_stmt(s) - } -} - -func (s *Savepoint_stmtContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitSavepoint_stmt(s) - } -} - -func (p *SQLiteParser) Savepoint_stmt() (localctx ISavepoint_stmtContext) { - this := p - _ = this - - localctx = NewSavepoint_stmtContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 18, SQLiteParserRULE_savepoint_stmt) - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(366) - p.Match(SQLiteParserSAVEPOINT_) - } - { - p.SetState(367) - p.Savepoint_name() - } - - return localctx -} - -// IRelease_stmtContext is an interface to support dynamic dispatch. -type IRelease_stmtContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - RELEASE_() antlr.TerminalNode - Savepoint_name() ISavepoint_nameContext - SAVEPOINT_() antlr.TerminalNode - - // IsRelease_stmtContext differentiates from other interfaces. - IsRelease_stmtContext() -} - -type Release_stmtContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyRelease_stmtContext() *Release_stmtContext { - var p = new(Release_stmtContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_release_stmt - return p -} - -func (*Release_stmtContext) IsRelease_stmtContext() {} - -func NewRelease_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Release_stmtContext { - var p = new(Release_stmtContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_release_stmt - - return p -} - -func (s *Release_stmtContext) GetParser() antlr.Parser { return s.parser } - -func (s *Release_stmtContext) RELEASE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserRELEASE_, 0) -} - -func (s *Release_stmtContext) Savepoint_name() ISavepoint_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ISavepoint_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ISavepoint_nameContext) -} - -func (s *Release_stmtContext) SAVEPOINT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserSAVEPOINT_, 0) -} - -func (s *Release_stmtContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Release_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Release_stmtContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterRelease_stmt(s) - } -} - -func (s *Release_stmtContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitRelease_stmt(s) - } -} - -func (p *SQLiteParser) Release_stmt() (localctx IRelease_stmtContext) { - this := p - _ = this - - localctx = NewRelease_stmtContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 20, SQLiteParserRULE_release_stmt) - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(369) - p.Match(SQLiteParserRELEASE_) - } - p.SetState(371) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 24, p.GetParserRuleContext()) == 1 { - { - p.SetState(370) - p.Match(SQLiteParserSAVEPOINT_) - } - - } - { - p.SetState(373) - p.Savepoint_name() - } - - return localctx -} - -// ICreate_index_stmtContext is an interface to support dynamic dispatch. -type ICreate_index_stmtContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - CREATE_() antlr.TerminalNode - INDEX_() antlr.TerminalNode - Index_name() IIndex_nameContext - ON_() antlr.TerminalNode - Table_name() ITable_nameContext - OPEN_PAR() antlr.TerminalNode - AllIndexed_column() []IIndexed_columnContext - Indexed_column(i int) IIndexed_columnContext - CLOSE_PAR() antlr.TerminalNode - UNIQUE_() antlr.TerminalNode - IF_() antlr.TerminalNode - NOT_() antlr.TerminalNode - EXISTS_() antlr.TerminalNode - Schema_name() ISchema_nameContext - DOT() antlr.TerminalNode - AllCOMMA() []antlr.TerminalNode - COMMA(i int) antlr.TerminalNode - WHERE_() antlr.TerminalNode - Expr() IExprContext - - // IsCreate_index_stmtContext differentiates from other interfaces. - IsCreate_index_stmtContext() -} - -type Create_index_stmtContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyCreate_index_stmtContext() *Create_index_stmtContext { - var p = new(Create_index_stmtContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_create_index_stmt - return p -} - -func (*Create_index_stmtContext) IsCreate_index_stmtContext() {} - -func NewCreate_index_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Create_index_stmtContext { - var p = new(Create_index_stmtContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_create_index_stmt - - return p -} - -func (s *Create_index_stmtContext) GetParser() antlr.Parser { return s.parser } - -func (s *Create_index_stmtContext) CREATE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserCREATE_, 0) -} - -func (s *Create_index_stmtContext) INDEX_() antlr.TerminalNode { - return s.GetToken(SQLiteParserINDEX_, 0) -} - -func (s *Create_index_stmtContext) Index_name() IIndex_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IIndex_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IIndex_nameContext) -} - -func (s *Create_index_stmtContext) ON_() antlr.TerminalNode { - return s.GetToken(SQLiteParserON_, 0) -} - -func (s *Create_index_stmtContext) Table_name() ITable_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ITable_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ITable_nameContext) -} - -func (s *Create_index_stmtContext) OPEN_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserOPEN_PAR, 0) -} - -func (s *Create_index_stmtContext) AllIndexed_column() []IIndexed_columnContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IIndexed_columnContext); ok { - len++ - } - } - - tst := make([]IIndexed_columnContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IIndexed_columnContext); ok { - tst[i] = t.(IIndexed_columnContext) - i++ - } - } - - return tst -} - -func (s *Create_index_stmtContext) Indexed_column(i int) IIndexed_columnContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IIndexed_columnContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IIndexed_columnContext) -} - -func (s *Create_index_stmtContext) CLOSE_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserCLOSE_PAR, 0) -} - -func (s *Create_index_stmtContext) UNIQUE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserUNIQUE_, 0) -} - -func (s *Create_index_stmtContext) IF_() antlr.TerminalNode { - return s.GetToken(SQLiteParserIF_, 0) -} - -func (s *Create_index_stmtContext) NOT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserNOT_, 0) -} - -func (s *Create_index_stmtContext) EXISTS_() antlr.TerminalNode { - return s.GetToken(SQLiteParserEXISTS_, 0) -} - -func (s *Create_index_stmtContext) Schema_name() ISchema_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ISchema_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ISchema_nameContext) -} - -func (s *Create_index_stmtContext) DOT() antlr.TerminalNode { - return s.GetToken(SQLiteParserDOT, 0) -} - -func (s *Create_index_stmtContext) AllCOMMA() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserCOMMA) -} - -func (s *Create_index_stmtContext) COMMA(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserCOMMA, i) -} - -func (s *Create_index_stmtContext) WHERE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserWHERE_, 0) -} - -func (s *Create_index_stmtContext) Expr() IExprContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IExprContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IExprContext) -} - -func (s *Create_index_stmtContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Create_index_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Create_index_stmtContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterCreate_index_stmt(s) - } -} - -func (s *Create_index_stmtContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitCreate_index_stmt(s) - } -} - -func (p *SQLiteParser) Create_index_stmt() (localctx ICreate_index_stmtContext) { - this := p - _ = this - - localctx = NewCreate_index_stmtContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 22, SQLiteParserRULE_create_index_stmt) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(375) - p.Match(SQLiteParserCREATE_) - } - p.SetState(377) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserUNIQUE_ { - { - p.SetState(376) - p.Match(SQLiteParserUNIQUE_) - } - - } - { - p.SetState(379) - p.Match(SQLiteParserINDEX_) - } - p.SetState(383) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 26, p.GetParserRuleContext()) == 1 { - { - p.SetState(380) - p.Match(SQLiteParserIF_) - } - { - p.SetState(381) - p.Match(SQLiteParserNOT_) - } - { - p.SetState(382) - p.Match(SQLiteParserEXISTS_) - } - - } - p.SetState(388) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 27, p.GetParserRuleContext()) == 1 { - { - p.SetState(385) - p.Schema_name() - } - { - p.SetState(386) - p.Match(SQLiteParserDOT) - } - - } - { - p.SetState(390) - p.Index_name() - } - { - p.SetState(391) - p.Match(SQLiteParserON_) - } - { - p.SetState(392) - p.Table_name() - } - { - p.SetState(393) - p.Match(SQLiteParserOPEN_PAR) - } - { - p.SetState(394) - p.Indexed_column() - } - p.SetState(399) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for _la == SQLiteParserCOMMA { - { - p.SetState(395) - p.Match(SQLiteParserCOMMA) - } - { - p.SetState(396) - p.Indexed_column() - } - - p.SetState(401) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - { - p.SetState(402) - p.Match(SQLiteParserCLOSE_PAR) - } - p.SetState(405) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserWHERE_ { - { - p.SetState(403) - p.Match(SQLiteParserWHERE_) - } - { - p.SetState(404) - p.expr(0) - } - - } - - return localctx -} - -// IIndexed_columnContext is an interface to support dynamic dispatch. -type IIndexed_columnContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - Column_name() IColumn_nameContext - Expr() IExprContext - COLLATE_() antlr.TerminalNode - Collation_name() ICollation_nameContext - Asc_desc() IAsc_descContext - - // IsIndexed_columnContext differentiates from other interfaces. - IsIndexed_columnContext() -} - -type Indexed_columnContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyIndexed_columnContext() *Indexed_columnContext { - var p = new(Indexed_columnContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_indexed_column - return p -} - -func (*Indexed_columnContext) IsIndexed_columnContext() {} - -func NewIndexed_columnContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Indexed_columnContext { - var p = new(Indexed_columnContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_indexed_column - - return p -} - -func (s *Indexed_columnContext) GetParser() antlr.Parser { return s.parser } - -func (s *Indexed_columnContext) Column_name() IColumn_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IColumn_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IColumn_nameContext) -} - -func (s *Indexed_columnContext) Expr() IExprContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IExprContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IExprContext) -} - -func (s *Indexed_columnContext) COLLATE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserCOLLATE_, 0) -} - -func (s *Indexed_columnContext) Collation_name() ICollation_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ICollation_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ICollation_nameContext) -} - -func (s *Indexed_columnContext) Asc_desc() IAsc_descContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IAsc_descContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IAsc_descContext) -} - -func (s *Indexed_columnContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Indexed_columnContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Indexed_columnContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterIndexed_column(s) - } -} - -func (s *Indexed_columnContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitIndexed_column(s) - } -} - -func (p *SQLiteParser) Indexed_column() (localctx IIndexed_columnContext) { - this := p - _ = this - - localctx = NewIndexed_columnContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 24, SQLiteParserRULE_indexed_column) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - p.SetState(409) - p.GetErrorHandler().Sync(p) - switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 30, p.GetParserRuleContext()) { - case 1: - { - p.SetState(407) - p.Column_name() - } - - case 2: - { - p.SetState(408) - p.expr(0) - } - - } - p.SetState(413) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserCOLLATE_ { - { - p.SetState(411) - p.Match(SQLiteParserCOLLATE_) - } - { - p.SetState(412) - p.Collation_name() - } - - } - p.SetState(416) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserASC_ || _la == SQLiteParserDESC_ { - { - p.SetState(415) - p.Asc_desc() - } - - } - - return localctx -} - -// ICreate_table_stmtContext is an interface to support dynamic dispatch. -type ICreate_table_stmtContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // GetRow_ROW_ID returns the row_ROW_ID token. - GetRow_ROW_ID() antlr.Token - - // SetRow_ROW_ID sets the row_ROW_ID token. - SetRow_ROW_ID(antlr.Token) - - // Getter signatures - CREATE_() antlr.TerminalNode - TABLE_() antlr.TerminalNode - Table_name() ITable_nameContext - OPEN_PAR() antlr.TerminalNode - AllColumn_def() []IColumn_defContext - Column_def(i int) IColumn_defContext - CLOSE_PAR() antlr.TerminalNode - AS_() antlr.TerminalNode - Select_stmt() ISelect_stmtContext - IF_() antlr.TerminalNode - NOT_() antlr.TerminalNode - EXISTS_() antlr.TerminalNode - Schema_name() ISchema_nameContext - DOT() antlr.TerminalNode - TEMP_() antlr.TerminalNode - TEMPORARY_() antlr.TerminalNode - AllCOMMA() []antlr.TerminalNode - COMMA(i int) antlr.TerminalNode - AllTable_constraint() []ITable_constraintContext - Table_constraint(i int) ITable_constraintContext - WITHOUT_() antlr.TerminalNode - IDENTIFIER() antlr.TerminalNode - - // IsCreate_table_stmtContext differentiates from other interfaces. - IsCreate_table_stmtContext() -} - -type Create_table_stmtContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser - row_ROW_ID antlr.Token -} - -func NewEmptyCreate_table_stmtContext() *Create_table_stmtContext { - var p = new(Create_table_stmtContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_create_table_stmt - return p -} - -func (*Create_table_stmtContext) IsCreate_table_stmtContext() {} - -func NewCreate_table_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Create_table_stmtContext { - var p = new(Create_table_stmtContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_create_table_stmt - - return p -} - -func (s *Create_table_stmtContext) GetParser() antlr.Parser { return s.parser } - -func (s *Create_table_stmtContext) GetRow_ROW_ID() antlr.Token { return s.row_ROW_ID } - -func (s *Create_table_stmtContext) SetRow_ROW_ID(v antlr.Token) { s.row_ROW_ID = v } - -func (s *Create_table_stmtContext) CREATE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserCREATE_, 0) -} - -func (s *Create_table_stmtContext) TABLE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserTABLE_, 0) -} - -func (s *Create_table_stmtContext) Table_name() ITable_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ITable_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ITable_nameContext) -} - -func (s *Create_table_stmtContext) OPEN_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserOPEN_PAR, 0) -} - -func (s *Create_table_stmtContext) AllColumn_def() []IColumn_defContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IColumn_defContext); ok { - len++ - } - } - - tst := make([]IColumn_defContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IColumn_defContext); ok { - tst[i] = t.(IColumn_defContext) - i++ - } - } - - return tst -} - -func (s *Create_table_stmtContext) Column_def(i int) IColumn_defContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IColumn_defContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IColumn_defContext) -} - -func (s *Create_table_stmtContext) CLOSE_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserCLOSE_PAR, 0) -} - -func (s *Create_table_stmtContext) AS_() antlr.TerminalNode { - return s.GetToken(SQLiteParserAS_, 0) -} - -func (s *Create_table_stmtContext) Select_stmt() ISelect_stmtContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ISelect_stmtContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ISelect_stmtContext) -} - -func (s *Create_table_stmtContext) IF_() antlr.TerminalNode { - return s.GetToken(SQLiteParserIF_, 0) -} - -func (s *Create_table_stmtContext) NOT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserNOT_, 0) -} - -func (s *Create_table_stmtContext) EXISTS_() antlr.TerminalNode { - return s.GetToken(SQLiteParserEXISTS_, 0) -} - -func (s *Create_table_stmtContext) Schema_name() ISchema_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ISchema_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ISchema_nameContext) -} - -func (s *Create_table_stmtContext) DOT() antlr.TerminalNode { - return s.GetToken(SQLiteParserDOT, 0) -} - -func (s *Create_table_stmtContext) TEMP_() antlr.TerminalNode { - return s.GetToken(SQLiteParserTEMP_, 0) -} - -func (s *Create_table_stmtContext) TEMPORARY_() antlr.TerminalNode { - return s.GetToken(SQLiteParserTEMPORARY_, 0) -} - -func (s *Create_table_stmtContext) AllCOMMA() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserCOMMA) -} - -func (s *Create_table_stmtContext) COMMA(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserCOMMA, i) -} - -func (s *Create_table_stmtContext) AllTable_constraint() []ITable_constraintContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(ITable_constraintContext); ok { - len++ - } - } - - tst := make([]ITable_constraintContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(ITable_constraintContext); ok { - tst[i] = t.(ITable_constraintContext) - i++ - } - } - - return tst -} - -func (s *Create_table_stmtContext) Table_constraint(i int) ITable_constraintContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ITable_constraintContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(ITable_constraintContext) -} - -func (s *Create_table_stmtContext) WITHOUT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserWITHOUT_, 0) -} - -func (s *Create_table_stmtContext) IDENTIFIER() antlr.TerminalNode { - return s.GetToken(SQLiteParserIDENTIFIER, 0) -} - -func (s *Create_table_stmtContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Create_table_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Create_table_stmtContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterCreate_table_stmt(s) - } -} - -func (s *Create_table_stmtContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitCreate_table_stmt(s) - } -} - -func (p *SQLiteParser) Create_table_stmt() (localctx ICreate_table_stmtContext) { - this := p - _ = this - - localctx = NewCreate_table_stmtContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 26, SQLiteParserRULE_create_table_stmt) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - var _alt int - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(418) - p.Match(SQLiteParserCREATE_) - } - p.SetState(420) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserTEMP_ || _la == SQLiteParserTEMPORARY_ { - { - p.SetState(419) - _la = p.GetTokenStream().LA(1) - - if !(_la == SQLiteParserTEMP_ || _la == SQLiteParserTEMPORARY_) { - p.GetErrorHandler().RecoverInline(p) - } else { - p.GetErrorHandler().ReportMatch(p) - p.Consume() - } - } - - } - { - p.SetState(422) - p.Match(SQLiteParserTABLE_) - } - p.SetState(426) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 34, p.GetParserRuleContext()) == 1 { - { - p.SetState(423) - p.Match(SQLiteParserIF_) - } - { - p.SetState(424) - p.Match(SQLiteParserNOT_) - } - { - p.SetState(425) - p.Match(SQLiteParserEXISTS_) - } - - } - p.SetState(431) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 35, p.GetParserRuleContext()) == 1 { - { - p.SetState(428) - p.Schema_name() - } - { - p.SetState(429) - p.Match(SQLiteParserDOT) - } - - } - { - p.SetState(433) - p.Table_name() - } - p.SetState(457) - p.GetErrorHandler().Sync(p) - - switch p.GetTokenStream().LA(1) { - case SQLiteParserOPEN_PAR: - { - p.SetState(434) - p.Match(SQLiteParserOPEN_PAR) - } - { - p.SetState(435) - p.Column_def() - } - p.SetState(440) - p.GetErrorHandler().Sync(p) - _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 36, p.GetParserRuleContext()) - - for _alt != 1 && _alt != antlr.ATNInvalidAltNumber { - if _alt == 1+1 { - { - p.SetState(436) - p.Match(SQLiteParserCOMMA) - } - { - p.SetState(437) - p.Column_def() - } - - } - p.SetState(442) - p.GetErrorHandler().Sync(p) - _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 36, p.GetParserRuleContext()) - } - p.SetState(447) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for _la == SQLiteParserCOMMA { - { - p.SetState(443) - p.Match(SQLiteParserCOMMA) - } - { - p.SetState(444) - p.Table_constraint() - } - - p.SetState(449) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - { - p.SetState(450) - p.Match(SQLiteParserCLOSE_PAR) - } - p.SetState(453) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserWITHOUT_ { - { - p.SetState(451) - p.Match(SQLiteParserWITHOUT_) - } - { - p.SetState(452) - - var _m = p.Match(SQLiteParserIDENTIFIER) - - localctx.(*Create_table_stmtContext).row_ROW_ID = _m - } - - } - - case SQLiteParserAS_: - { - p.SetState(455) - p.Match(SQLiteParserAS_) - } - { - p.SetState(456) - p.Select_stmt() - } - - default: - panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) - } - - return localctx -} - -// IColumn_defContext is an interface to support dynamic dispatch. -type IColumn_defContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - Column_name() IColumn_nameContext - Type_name() IType_nameContext - AllColumn_constraint() []IColumn_constraintContext - Column_constraint(i int) IColumn_constraintContext - - // IsColumn_defContext differentiates from other interfaces. - IsColumn_defContext() -} - -type Column_defContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyColumn_defContext() *Column_defContext { - var p = new(Column_defContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_column_def - return p -} - -func (*Column_defContext) IsColumn_defContext() {} - -func NewColumn_defContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Column_defContext { - var p = new(Column_defContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_column_def - - return p -} - -func (s *Column_defContext) GetParser() antlr.Parser { return s.parser } - -func (s *Column_defContext) Column_name() IColumn_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IColumn_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IColumn_nameContext) -} - -func (s *Column_defContext) Type_name() IType_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IType_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IType_nameContext) -} - -func (s *Column_defContext) AllColumn_constraint() []IColumn_constraintContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IColumn_constraintContext); ok { - len++ - } - } - - tst := make([]IColumn_constraintContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IColumn_constraintContext); ok { - tst[i] = t.(IColumn_constraintContext) - i++ - } - } - - return tst -} - -func (s *Column_defContext) Column_constraint(i int) IColumn_constraintContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IColumn_constraintContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IColumn_constraintContext) -} - -func (s *Column_defContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Column_defContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Column_defContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterColumn_def(s) - } -} - -func (s *Column_defContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitColumn_def(s) - } -} - -func (p *SQLiteParser) Column_def() (localctx IColumn_defContext) { - this := p - _ = this - - localctx = NewColumn_defContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 28, SQLiteParserRULE_column_def) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(459) - p.Column_name() - } - p.SetState(461) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 40, p.GetParserRuleContext()) == 1 { - { - p.SetState(460) - p.Type_name() - } - - } - p.SetState(466) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for ((int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&72673329139417088) != 0) || ((int64((_la-102)) & ^0x3f) == 0 && ((int64(1)<<(_la-102))&274877941765) != 0) || _la == SQLiteParserGENERATED_ { - { - p.SetState(463) - p.Column_constraint() - } - - p.SetState(468) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - - return localctx -} - -// IType_nameContext is an interface to support dynamic dispatch. -type IType_nameContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - AllName() []INameContext - Name(i int) INameContext - OPEN_PAR() antlr.TerminalNode - AllSigned_number() []ISigned_numberContext - Signed_number(i int) ISigned_numberContext - CLOSE_PAR() antlr.TerminalNode - COMMA() antlr.TerminalNode - - // IsType_nameContext differentiates from other interfaces. - IsType_nameContext() -} - -type Type_nameContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyType_nameContext() *Type_nameContext { - var p = new(Type_nameContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_type_name - return p -} - -func (*Type_nameContext) IsType_nameContext() {} - -func NewType_nameContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Type_nameContext { - var p = new(Type_nameContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_type_name - - return p -} - -func (s *Type_nameContext) GetParser() antlr.Parser { return s.parser } - -func (s *Type_nameContext) AllName() []INameContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(INameContext); ok { - len++ - } - } - - tst := make([]INameContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(INameContext); ok { - tst[i] = t.(INameContext) - i++ - } - } - - return tst -} - -func (s *Type_nameContext) Name(i int) INameContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(INameContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(INameContext) -} - -func (s *Type_nameContext) OPEN_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserOPEN_PAR, 0) -} - -func (s *Type_nameContext) AllSigned_number() []ISigned_numberContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(ISigned_numberContext); ok { - len++ - } - } - - tst := make([]ISigned_numberContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(ISigned_numberContext); ok { - tst[i] = t.(ISigned_numberContext) - i++ - } - } - - return tst -} - -func (s *Type_nameContext) Signed_number(i int) ISigned_numberContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ISigned_numberContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(ISigned_numberContext) -} - -func (s *Type_nameContext) CLOSE_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserCLOSE_PAR, 0) -} - -func (s *Type_nameContext) COMMA() antlr.TerminalNode { - return s.GetToken(SQLiteParserCOMMA, 0) -} - -func (s *Type_nameContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Type_nameContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Type_nameContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterType_name(s) - } -} - -func (s *Type_nameContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitType_name(s) - } -} - -func (p *SQLiteParser) Type_name() (localctx IType_nameContext) { - this := p - _ = this - - localctx = NewType_nameContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 30, SQLiteParserRULE_type_name) - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - var _alt int - - p.EnterOuterAlt(localctx, 1) - p.SetState(470) - p.GetErrorHandler().Sync(p) - _alt = 1 + 1 - for ok := true; ok; ok = _alt != 1 && _alt != antlr.ATNInvalidAltNumber { - switch _alt { - case 1 + 1: - { - p.SetState(469) - p.Name() - } - - default: - panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) - } - - p.SetState(472) - p.GetErrorHandler().Sync(p) - _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 42, p.GetParserRuleContext()) - } - p.SetState(484) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 43, p.GetParserRuleContext()) == 1 { - { - p.SetState(474) - p.Match(SQLiteParserOPEN_PAR) - } - { - p.SetState(475) - p.Signed_number() - } - { - p.SetState(476) - p.Match(SQLiteParserCLOSE_PAR) - } - - } else if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 43, p.GetParserRuleContext()) == 2 { - { - p.SetState(478) - p.Match(SQLiteParserOPEN_PAR) - } - { - p.SetState(479) - p.Signed_number() - } - { - p.SetState(480) - p.Match(SQLiteParserCOMMA) - } - { - p.SetState(481) - p.Signed_number() - } - { - p.SetState(482) - p.Match(SQLiteParserCLOSE_PAR) - } - - } - - return localctx -} - -// IColumn_constraintContext is an interface to support dynamic dispatch. -type IColumn_constraintContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - CHECK_() antlr.TerminalNode - OPEN_PAR() antlr.TerminalNode - Expr() IExprContext - CLOSE_PAR() antlr.TerminalNode - DEFAULT_() antlr.TerminalNode - COLLATE_() antlr.TerminalNode - Collation_name() ICollation_nameContext - Foreign_key_clause() IForeign_key_clauseContext - AS_() antlr.TerminalNode - CONSTRAINT_() antlr.TerminalNode - Name() INameContext - PRIMARY_() antlr.TerminalNode - KEY_() antlr.TerminalNode - NULL_() antlr.TerminalNode - UNIQUE_() antlr.TerminalNode - Signed_number() ISigned_numberContext - Literal_value() ILiteral_valueContext - Conflict_clause() IConflict_clauseContext - GENERATED_() antlr.TerminalNode - ALWAYS_() antlr.TerminalNode - STORED_() antlr.TerminalNode - VIRTUAL_() antlr.TerminalNode - Asc_desc() IAsc_descContext - AUTOINCREMENT_() antlr.TerminalNode - NOT_() antlr.TerminalNode - - // IsColumn_constraintContext differentiates from other interfaces. - IsColumn_constraintContext() -} - -type Column_constraintContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyColumn_constraintContext() *Column_constraintContext { - var p = new(Column_constraintContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_column_constraint - return p -} - -func (*Column_constraintContext) IsColumn_constraintContext() {} - -func NewColumn_constraintContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Column_constraintContext { - var p = new(Column_constraintContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_column_constraint - - return p -} - -func (s *Column_constraintContext) GetParser() antlr.Parser { return s.parser } - -func (s *Column_constraintContext) CHECK_() antlr.TerminalNode { - return s.GetToken(SQLiteParserCHECK_, 0) -} - -func (s *Column_constraintContext) OPEN_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserOPEN_PAR, 0) -} - -func (s *Column_constraintContext) Expr() IExprContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IExprContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IExprContext) -} - -func (s *Column_constraintContext) CLOSE_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserCLOSE_PAR, 0) -} - -func (s *Column_constraintContext) DEFAULT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserDEFAULT_, 0) -} - -func (s *Column_constraintContext) COLLATE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserCOLLATE_, 0) -} - -func (s *Column_constraintContext) Collation_name() ICollation_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ICollation_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ICollation_nameContext) -} - -func (s *Column_constraintContext) Foreign_key_clause() IForeign_key_clauseContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IForeign_key_clauseContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IForeign_key_clauseContext) -} - -func (s *Column_constraintContext) AS_() antlr.TerminalNode { - return s.GetToken(SQLiteParserAS_, 0) -} - -func (s *Column_constraintContext) CONSTRAINT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserCONSTRAINT_, 0) -} - -func (s *Column_constraintContext) Name() INameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(INameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(INameContext) -} - -func (s *Column_constraintContext) PRIMARY_() antlr.TerminalNode { - return s.GetToken(SQLiteParserPRIMARY_, 0) -} - -func (s *Column_constraintContext) KEY_() antlr.TerminalNode { - return s.GetToken(SQLiteParserKEY_, 0) -} - -func (s *Column_constraintContext) NULL_() antlr.TerminalNode { - return s.GetToken(SQLiteParserNULL_, 0) -} - -func (s *Column_constraintContext) UNIQUE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserUNIQUE_, 0) -} - -func (s *Column_constraintContext) Signed_number() ISigned_numberContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ISigned_numberContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ISigned_numberContext) -} - -func (s *Column_constraintContext) Literal_value() ILiteral_valueContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ILiteral_valueContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ILiteral_valueContext) -} - -func (s *Column_constraintContext) Conflict_clause() IConflict_clauseContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IConflict_clauseContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IConflict_clauseContext) -} - -func (s *Column_constraintContext) GENERATED_() antlr.TerminalNode { - return s.GetToken(SQLiteParserGENERATED_, 0) -} - -func (s *Column_constraintContext) ALWAYS_() antlr.TerminalNode { - return s.GetToken(SQLiteParserALWAYS_, 0) -} - -func (s *Column_constraintContext) STORED_() antlr.TerminalNode { - return s.GetToken(SQLiteParserSTORED_, 0) -} - -func (s *Column_constraintContext) VIRTUAL_() antlr.TerminalNode { - return s.GetToken(SQLiteParserVIRTUAL_, 0) -} - -func (s *Column_constraintContext) Asc_desc() IAsc_descContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IAsc_descContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IAsc_descContext) -} - -func (s *Column_constraintContext) AUTOINCREMENT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserAUTOINCREMENT_, 0) -} - -func (s *Column_constraintContext) NOT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserNOT_, 0) -} - -func (s *Column_constraintContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Column_constraintContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Column_constraintContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterColumn_constraint(s) - } -} - -func (s *Column_constraintContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitColumn_constraint(s) - } -} - -func (p *SQLiteParser) Column_constraint() (localctx IColumn_constraintContext) { - this := p - _ = this - - localctx = NewColumn_constraintContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 32, SQLiteParserRULE_column_constraint) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - p.SetState(488) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserCONSTRAINT_ { - { - p.SetState(486) - p.Match(SQLiteParserCONSTRAINT_) - } - { - p.SetState(487) - p.Name() - } - - } - p.SetState(539) - p.GetErrorHandler().Sync(p) - - switch p.GetTokenStream().LA(1) { - case SQLiteParserPRIMARY_: - { - p.SetState(490) - p.Match(SQLiteParserPRIMARY_) - } - { - p.SetState(491) - p.Match(SQLiteParserKEY_) - } - p.SetState(493) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserASC_ || _la == SQLiteParserDESC_ { - { - p.SetState(492) - p.Asc_desc() - } - - } - p.SetState(496) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserON_ { - { - p.SetState(495) - p.Conflict_clause() - } - - } - p.SetState(499) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserAUTOINCREMENT_ { - { - p.SetState(498) - p.Match(SQLiteParserAUTOINCREMENT_) - } - - } - - case SQLiteParserNOT_, SQLiteParserNULL_, SQLiteParserUNIQUE_: - p.SetState(506) - p.GetErrorHandler().Sync(p) - - switch p.GetTokenStream().LA(1) { - case SQLiteParserNOT_, SQLiteParserNULL_: - p.SetState(502) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserNOT_ { - { - p.SetState(501) - p.Match(SQLiteParserNOT_) - } - - } - { - p.SetState(504) - p.Match(SQLiteParserNULL_) - } - - case SQLiteParserUNIQUE_: - { - p.SetState(505) - p.Match(SQLiteParserUNIQUE_) - } - - default: - panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) - } - p.SetState(509) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserON_ { - { - p.SetState(508) - p.Conflict_clause() - } - - } - - case SQLiteParserCHECK_: - { - p.SetState(511) - p.Match(SQLiteParserCHECK_) - } - { - p.SetState(512) - p.Match(SQLiteParserOPEN_PAR) - } - { - p.SetState(513) - p.expr(0) - } - { - p.SetState(514) - p.Match(SQLiteParserCLOSE_PAR) - } - - case SQLiteParserDEFAULT_: - { - p.SetState(516) - p.Match(SQLiteParserDEFAULT_) - } - p.SetState(523) - p.GetErrorHandler().Sync(p) - switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 51, p.GetParserRuleContext()) { - case 1: - { - p.SetState(517) - p.Signed_number() - } - - case 2: - { - p.SetState(518) - p.Literal_value() - } - - case 3: - { - p.SetState(519) - p.Match(SQLiteParserOPEN_PAR) - } - { - p.SetState(520) - p.expr(0) - } - { - p.SetState(521) - p.Match(SQLiteParserCLOSE_PAR) - } - - } - - case SQLiteParserCOLLATE_: - { - p.SetState(525) - p.Match(SQLiteParserCOLLATE_) - } - { - p.SetState(526) - p.Collation_name() - } - - case SQLiteParserREFERENCES_: - { - p.SetState(527) - p.Foreign_key_clause() - } - - case SQLiteParserAS_, SQLiteParserGENERATED_: - p.SetState(530) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserGENERATED_ { - { - p.SetState(528) - p.Match(SQLiteParserGENERATED_) - } - { - p.SetState(529) - p.Match(SQLiteParserALWAYS_) - } - - } - { - p.SetState(532) - p.Match(SQLiteParserAS_) - } - { - p.SetState(533) - p.Match(SQLiteParserOPEN_PAR) - } - { - p.SetState(534) - p.expr(0) - } - { - p.SetState(535) - p.Match(SQLiteParserCLOSE_PAR) - } - p.SetState(537) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserVIRTUAL_ || _la == SQLiteParserSTORED_ { - { - p.SetState(536) - _la = p.GetTokenStream().LA(1) - - if !(_la == SQLiteParserVIRTUAL_ || _la == SQLiteParserSTORED_) { - p.GetErrorHandler().RecoverInline(p) - } else { - p.GetErrorHandler().ReportMatch(p) - p.Consume() - } - } - - } - - default: - panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) - } - - return localctx -} - -// ISigned_numberContext is an interface to support dynamic dispatch. -type ISigned_numberContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - NUMERIC_LITERAL() antlr.TerminalNode - PLUS() antlr.TerminalNode - MINUS() antlr.TerminalNode - - // IsSigned_numberContext differentiates from other interfaces. - IsSigned_numberContext() -} - -type Signed_numberContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptySigned_numberContext() *Signed_numberContext { - var p = new(Signed_numberContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_signed_number - return p -} - -func (*Signed_numberContext) IsSigned_numberContext() {} - -func NewSigned_numberContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Signed_numberContext { - var p = new(Signed_numberContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_signed_number - - return p -} - -func (s *Signed_numberContext) GetParser() antlr.Parser { return s.parser } - -func (s *Signed_numberContext) NUMERIC_LITERAL() antlr.TerminalNode { - return s.GetToken(SQLiteParserNUMERIC_LITERAL, 0) -} - -func (s *Signed_numberContext) PLUS() antlr.TerminalNode { - return s.GetToken(SQLiteParserPLUS, 0) -} - -func (s *Signed_numberContext) MINUS() antlr.TerminalNode { - return s.GetToken(SQLiteParserMINUS, 0) -} - -func (s *Signed_numberContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Signed_numberContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Signed_numberContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterSigned_number(s) - } -} - -func (s *Signed_numberContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitSigned_number(s) - } -} - -func (p *SQLiteParser) Signed_number() (localctx ISigned_numberContext) { - this := p - _ = this - - localctx = NewSigned_numberContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 34, SQLiteParserRULE_signed_number) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - p.SetState(542) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserPLUS || _la == SQLiteParserMINUS { - { - p.SetState(541) - _la = p.GetTokenStream().LA(1) - - if !(_la == SQLiteParserPLUS || _la == SQLiteParserMINUS) { - p.GetErrorHandler().RecoverInline(p) - } else { - p.GetErrorHandler().ReportMatch(p) - p.Consume() - } - } - - } - { - p.SetState(544) - p.Match(SQLiteParserNUMERIC_LITERAL) - } - - return localctx -} - -// ITable_constraintContext is an interface to support dynamic dispatch. -type ITable_constraintContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - OPEN_PAR() antlr.TerminalNode - AllIndexed_column() []IIndexed_columnContext - Indexed_column(i int) IIndexed_columnContext - CLOSE_PAR() antlr.TerminalNode - CHECK_() antlr.TerminalNode - Expr() IExprContext - FOREIGN_() antlr.TerminalNode - KEY_() antlr.TerminalNode - AllColumn_name() []IColumn_nameContext - Column_name(i int) IColumn_nameContext - Foreign_key_clause() IForeign_key_clauseContext - CONSTRAINT_() antlr.TerminalNode - Name() INameContext - PRIMARY_() antlr.TerminalNode - UNIQUE_() antlr.TerminalNode - AllCOMMA() []antlr.TerminalNode - COMMA(i int) antlr.TerminalNode - Conflict_clause() IConflict_clauseContext - - // IsTable_constraintContext differentiates from other interfaces. - IsTable_constraintContext() -} - -type Table_constraintContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyTable_constraintContext() *Table_constraintContext { - var p = new(Table_constraintContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_table_constraint - return p -} - -func (*Table_constraintContext) IsTable_constraintContext() {} - -func NewTable_constraintContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Table_constraintContext { - var p = new(Table_constraintContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_table_constraint - - return p -} - -func (s *Table_constraintContext) GetParser() antlr.Parser { return s.parser } - -func (s *Table_constraintContext) OPEN_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserOPEN_PAR, 0) -} - -func (s *Table_constraintContext) AllIndexed_column() []IIndexed_columnContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IIndexed_columnContext); ok { - len++ - } - } - - tst := make([]IIndexed_columnContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IIndexed_columnContext); ok { - tst[i] = t.(IIndexed_columnContext) - i++ - } - } - - return tst -} - -func (s *Table_constraintContext) Indexed_column(i int) IIndexed_columnContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IIndexed_columnContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IIndexed_columnContext) -} - -func (s *Table_constraintContext) CLOSE_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserCLOSE_PAR, 0) -} - -func (s *Table_constraintContext) CHECK_() antlr.TerminalNode { - return s.GetToken(SQLiteParserCHECK_, 0) -} - -func (s *Table_constraintContext) Expr() IExprContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IExprContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IExprContext) -} - -func (s *Table_constraintContext) FOREIGN_() antlr.TerminalNode { - return s.GetToken(SQLiteParserFOREIGN_, 0) -} - -func (s *Table_constraintContext) KEY_() antlr.TerminalNode { - return s.GetToken(SQLiteParserKEY_, 0) -} - -func (s *Table_constraintContext) AllColumn_name() []IColumn_nameContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IColumn_nameContext); ok { - len++ - } - } - - tst := make([]IColumn_nameContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IColumn_nameContext); ok { - tst[i] = t.(IColumn_nameContext) - i++ - } - } - - return tst -} - -func (s *Table_constraintContext) Column_name(i int) IColumn_nameContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IColumn_nameContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IColumn_nameContext) -} - -func (s *Table_constraintContext) Foreign_key_clause() IForeign_key_clauseContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IForeign_key_clauseContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IForeign_key_clauseContext) -} - -func (s *Table_constraintContext) CONSTRAINT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserCONSTRAINT_, 0) -} - -func (s *Table_constraintContext) Name() INameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(INameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(INameContext) -} - -func (s *Table_constraintContext) PRIMARY_() antlr.TerminalNode { - return s.GetToken(SQLiteParserPRIMARY_, 0) -} - -func (s *Table_constraintContext) UNIQUE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserUNIQUE_, 0) -} - -func (s *Table_constraintContext) AllCOMMA() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserCOMMA) -} - -func (s *Table_constraintContext) COMMA(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserCOMMA, i) -} - -func (s *Table_constraintContext) Conflict_clause() IConflict_clauseContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IConflict_clauseContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IConflict_clauseContext) -} - -func (s *Table_constraintContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Table_constraintContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Table_constraintContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterTable_constraint(s) - } -} - -func (s *Table_constraintContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitTable_constraint(s) - } -} - -func (p *SQLiteParser) Table_constraint() (localctx ITable_constraintContext) { - this := p - _ = this - - localctx = NewTable_constraintContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 36, SQLiteParserRULE_table_constraint) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - p.SetState(548) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserCONSTRAINT_ { - { - p.SetState(546) - p.Match(SQLiteParserCONSTRAINT_) - } - { - p.SetState(547) - p.Name() - } - - } - p.SetState(587) - p.GetErrorHandler().Sync(p) - - switch p.GetTokenStream().LA(1) { - case SQLiteParserPRIMARY_, SQLiteParserUNIQUE_: - p.SetState(553) - p.GetErrorHandler().Sync(p) - - switch p.GetTokenStream().LA(1) { - case SQLiteParserPRIMARY_: - { - p.SetState(550) - p.Match(SQLiteParserPRIMARY_) - } - { - p.SetState(551) - p.Match(SQLiteParserKEY_) - } - - case SQLiteParserUNIQUE_: - { - p.SetState(552) - p.Match(SQLiteParserUNIQUE_) - } - - default: - panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) - } - { - p.SetState(555) - p.Match(SQLiteParserOPEN_PAR) - } - { - p.SetState(556) - p.Indexed_column() - } - p.SetState(561) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for _la == SQLiteParserCOMMA { - { - p.SetState(557) - p.Match(SQLiteParserCOMMA) - } - { - p.SetState(558) - p.Indexed_column() - } - - p.SetState(563) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - { - p.SetState(564) - p.Match(SQLiteParserCLOSE_PAR) - } - p.SetState(566) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserON_ { - { - p.SetState(565) - p.Conflict_clause() - } - - } - - case SQLiteParserCHECK_: - { - p.SetState(568) - p.Match(SQLiteParserCHECK_) - } - { - p.SetState(569) - p.Match(SQLiteParserOPEN_PAR) - } - { - p.SetState(570) - p.expr(0) - } - { - p.SetState(571) - p.Match(SQLiteParserCLOSE_PAR) - } - - case SQLiteParserFOREIGN_: - { - p.SetState(573) - p.Match(SQLiteParserFOREIGN_) - } - { - p.SetState(574) - p.Match(SQLiteParserKEY_) - } - { - p.SetState(575) - p.Match(SQLiteParserOPEN_PAR) - } - { - p.SetState(576) - p.Column_name() - } - p.SetState(581) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for _la == SQLiteParserCOMMA { - { - p.SetState(577) - p.Match(SQLiteParserCOMMA) - } - { - p.SetState(578) - p.Column_name() - } - - p.SetState(583) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - { - p.SetState(584) - p.Match(SQLiteParserCLOSE_PAR) - } - { - p.SetState(585) - p.Foreign_key_clause() - } - - default: - panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) - } - - return localctx -} - -// IForeign_key_clauseContext is an interface to support dynamic dispatch. -type IForeign_key_clauseContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - REFERENCES_() antlr.TerminalNode - Foreign_table() IForeign_tableContext - OPEN_PAR() antlr.TerminalNode - AllColumn_name() []IColumn_nameContext - Column_name(i int) IColumn_nameContext - CLOSE_PAR() antlr.TerminalNode - AllON_() []antlr.TerminalNode - ON_(i int) antlr.TerminalNode - AllMATCH_() []antlr.TerminalNode - MATCH_(i int) antlr.TerminalNode - AllName() []INameContext - Name(i int) INameContext - DEFERRABLE_() antlr.TerminalNode - AllDELETE_() []antlr.TerminalNode - DELETE_(i int) antlr.TerminalNode - AllUPDATE_() []antlr.TerminalNode - UPDATE_(i int) antlr.TerminalNode - AllSET_() []antlr.TerminalNode - SET_(i int) antlr.TerminalNode - AllCASCADE_() []antlr.TerminalNode - CASCADE_(i int) antlr.TerminalNode - AllRESTRICT_() []antlr.TerminalNode - RESTRICT_(i int) antlr.TerminalNode - AllNO_() []antlr.TerminalNode - NO_(i int) antlr.TerminalNode - AllACTION_() []antlr.TerminalNode - ACTION_(i int) antlr.TerminalNode - AllCOMMA() []antlr.TerminalNode - COMMA(i int) antlr.TerminalNode - AllNULL_() []antlr.TerminalNode - NULL_(i int) antlr.TerminalNode - AllDEFAULT_() []antlr.TerminalNode - DEFAULT_(i int) antlr.TerminalNode - NOT_() antlr.TerminalNode - INITIALLY_() antlr.TerminalNode - DEFERRED_() antlr.TerminalNode - IMMEDIATE_() antlr.TerminalNode - - // IsForeign_key_clauseContext differentiates from other interfaces. - IsForeign_key_clauseContext() -} - -type Foreign_key_clauseContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyForeign_key_clauseContext() *Foreign_key_clauseContext { - var p = new(Foreign_key_clauseContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_foreign_key_clause - return p -} - -func (*Foreign_key_clauseContext) IsForeign_key_clauseContext() {} - -func NewForeign_key_clauseContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Foreign_key_clauseContext { - var p = new(Foreign_key_clauseContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_foreign_key_clause - - return p -} - -func (s *Foreign_key_clauseContext) GetParser() antlr.Parser { return s.parser } - -func (s *Foreign_key_clauseContext) REFERENCES_() antlr.TerminalNode { - return s.GetToken(SQLiteParserREFERENCES_, 0) -} - -func (s *Foreign_key_clauseContext) Foreign_table() IForeign_tableContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IForeign_tableContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IForeign_tableContext) -} - -func (s *Foreign_key_clauseContext) OPEN_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserOPEN_PAR, 0) -} - -func (s *Foreign_key_clauseContext) AllColumn_name() []IColumn_nameContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IColumn_nameContext); ok { - len++ - } - } - - tst := make([]IColumn_nameContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IColumn_nameContext); ok { - tst[i] = t.(IColumn_nameContext) - i++ - } - } - - return tst -} - -func (s *Foreign_key_clauseContext) Column_name(i int) IColumn_nameContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IColumn_nameContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IColumn_nameContext) -} - -func (s *Foreign_key_clauseContext) CLOSE_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserCLOSE_PAR, 0) -} - -func (s *Foreign_key_clauseContext) AllON_() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserON_) -} - -func (s *Foreign_key_clauseContext) ON_(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserON_, i) -} - -func (s *Foreign_key_clauseContext) AllMATCH_() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserMATCH_) -} - -func (s *Foreign_key_clauseContext) MATCH_(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserMATCH_, i) -} - -func (s *Foreign_key_clauseContext) AllName() []INameContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(INameContext); ok { - len++ - } - } - - tst := make([]INameContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(INameContext); ok { - tst[i] = t.(INameContext) - i++ - } - } - - return tst -} - -func (s *Foreign_key_clauseContext) Name(i int) INameContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(INameContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(INameContext) -} - -func (s *Foreign_key_clauseContext) DEFERRABLE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserDEFERRABLE_, 0) -} - -func (s *Foreign_key_clauseContext) AllDELETE_() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserDELETE_) -} - -func (s *Foreign_key_clauseContext) DELETE_(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserDELETE_, i) -} - -func (s *Foreign_key_clauseContext) AllUPDATE_() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserUPDATE_) -} - -func (s *Foreign_key_clauseContext) UPDATE_(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserUPDATE_, i) -} - -func (s *Foreign_key_clauseContext) AllSET_() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserSET_) -} - -func (s *Foreign_key_clauseContext) SET_(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserSET_, i) -} - -func (s *Foreign_key_clauseContext) AllCASCADE_() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserCASCADE_) -} - -func (s *Foreign_key_clauseContext) CASCADE_(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserCASCADE_, i) -} - -func (s *Foreign_key_clauseContext) AllRESTRICT_() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserRESTRICT_) -} - -func (s *Foreign_key_clauseContext) RESTRICT_(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserRESTRICT_, i) -} - -func (s *Foreign_key_clauseContext) AllNO_() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserNO_) -} - -func (s *Foreign_key_clauseContext) NO_(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserNO_, i) -} - -func (s *Foreign_key_clauseContext) AllACTION_() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserACTION_) -} - -func (s *Foreign_key_clauseContext) ACTION_(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserACTION_, i) -} - -func (s *Foreign_key_clauseContext) AllCOMMA() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserCOMMA) -} - -func (s *Foreign_key_clauseContext) COMMA(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserCOMMA, i) -} - -func (s *Foreign_key_clauseContext) AllNULL_() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserNULL_) -} - -func (s *Foreign_key_clauseContext) NULL_(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserNULL_, i) -} - -func (s *Foreign_key_clauseContext) AllDEFAULT_() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserDEFAULT_) -} - -func (s *Foreign_key_clauseContext) DEFAULT_(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserDEFAULT_, i) -} - -func (s *Foreign_key_clauseContext) NOT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserNOT_, 0) -} - -func (s *Foreign_key_clauseContext) INITIALLY_() antlr.TerminalNode { - return s.GetToken(SQLiteParserINITIALLY_, 0) -} - -func (s *Foreign_key_clauseContext) DEFERRED_() antlr.TerminalNode { - return s.GetToken(SQLiteParserDEFERRED_, 0) -} - -func (s *Foreign_key_clauseContext) IMMEDIATE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserIMMEDIATE_, 0) -} - -func (s *Foreign_key_clauseContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Foreign_key_clauseContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Foreign_key_clauseContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterForeign_key_clause(s) - } -} - -func (s *Foreign_key_clauseContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitForeign_key_clause(s) - } -} - -func (p *SQLiteParser) Foreign_key_clause() (localctx IForeign_key_clauseContext) { - this := p - _ = this - - localctx = NewForeign_key_clauseContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 38, SQLiteParserRULE_foreign_key_clause) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(589) - p.Match(SQLiteParserREFERENCES_) - } - { - p.SetState(590) - p.Foreign_table() - } - p.SetState(602) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserOPEN_PAR { - { - p.SetState(591) - p.Match(SQLiteParserOPEN_PAR) - } - { - p.SetState(592) - p.Column_name() - } - p.SetState(597) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for _la == SQLiteParserCOMMA { - { - p.SetState(593) - p.Match(SQLiteParserCOMMA) - } - { - p.SetState(594) - p.Column_name() - } - - p.SetState(599) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - { - p.SetState(600) - p.Match(SQLiteParserCLOSE_PAR) - } - - } - p.SetState(618) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for _la == SQLiteParserMATCH_ || _la == SQLiteParserON_ { - p.SetState(616) - p.GetErrorHandler().Sync(p) - - switch p.GetTokenStream().LA(1) { - case SQLiteParserON_: - { - p.SetState(604) - p.Match(SQLiteParserON_) - } - { - p.SetState(605) - _la = p.GetTokenStream().LA(1) - - if !(_la == SQLiteParserDELETE_ || _la == SQLiteParserUPDATE_) { - p.GetErrorHandler().RecoverInline(p) - } else { - p.GetErrorHandler().ReportMatch(p) - p.Consume() - } - } - p.SetState(612) - p.GetErrorHandler().Sync(p) - - switch p.GetTokenStream().LA(1) { - case SQLiteParserSET_: - { - p.SetState(606) - p.Match(SQLiteParserSET_) - } - { - p.SetState(607) - _la = p.GetTokenStream().LA(1) - - if !(_la == SQLiteParserDEFAULT_ || _la == SQLiteParserNULL_) { - p.GetErrorHandler().RecoverInline(p) - } else { - p.GetErrorHandler().ReportMatch(p) - p.Consume() - } - } - - case SQLiteParserCASCADE_: - { - p.SetState(608) - p.Match(SQLiteParserCASCADE_) - } - - case SQLiteParserRESTRICT_: - { - p.SetState(609) - p.Match(SQLiteParserRESTRICT_) - } - - case SQLiteParserNO_: - { - p.SetState(610) - p.Match(SQLiteParserNO_) - } - { - p.SetState(611) - p.Match(SQLiteParserACTION_) - } - - default: - panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) - } - - case SQLiteParserMATCH_: - { - p.SetState(614) - p.Match(SQLiteParserMATCH_) - } - { - p.SetState(615) - p.Name() - } - - default: - panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) - } - - p.SetState(620) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - p.SetState(629) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 69, p.GetParserRuleContext()) == 1 { - p.SetState(622) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserNOT_ { - { - p.SetState(621) - p.Match(SQLiteParserNOT_) - } - - } - { - p.SetState(624) - p.Match(SQLiteParserDEFERRABLE_) - } - p.SetState(627) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserINITIALLY_ { - { - p.SetState(625) - p.Match(SQLiteParserINITIALLY_) - } - { - p.SetState(626) - _la = p.GetTokenStream().LA(1) - - if !(_la == SQLiteParserDEFERRED_ || _la == SQLiteParserIMMEDIATE_) { - p.GetErrorHandler().RecoverInline(p) - } else { - p.GetErrorHandler().ReportMatch(p) - p.Consume() - } - } - - } - - } - - return localctx -} - -// IConflict_clauseContext is an interface to support dynamic dispatch. -type IConflict_clauseContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - ON_() antlr.TerminalNode - CONFLICT_() antlr.TerminalNode - ROLLBACK_() antlr.TerminalNode - ABORT_() antlr.TerminalNode - FAIL_() antlr.TerminalNode - IGNORE_() antlr.TerminalNode - REPLACE_() antlr.TerminalNode - - // IsConflict_clauseContext differentiates from other interfaces. - IsConflict_clauseContext() -} - -type Conflict_clauseContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyConflict_clauseContext() *Conflict_clauseContext { - var p = new(Conflict_clauseContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_conflict_clause - return p -} - -func (*Conflict_clauseContext) IsConflict_clauseContext() {} - -func NewConflict_clauseContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Conflict_clauseContext { - var p = new(Conflict_clauseContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_conflict_clause - - return p -} - -func (s *Conflict_clauseContext) GetParser() antlr.Parser { return s.parser } - -func (s *Conflict_clauseContext) ON_() antlr.TerminalNode { - return s.GetToken(SQLiteParserON_, 0) -} - -func (s *Conflict_clauseContext) CONFLICT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserCONFLICT_, 0) -} - -func (s *Conflict_clauseContext) ROLLBACK_() antlr.TerminalNode { - return s.GetToken(SQLiteParserROLLBACK_, 0) -} - -func (s *Conflict_clauseContext) ABORT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserABORT_, 0) -} - -func (s *Conflict_clauseContext) FAIL_() antlr.TerminalNode { - return s.GetToken(SQLiteParserFAIL_, 0) -} - -func (s *Conflict_clauseContext) IGNORE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserIGNORE_, 0) -} - -func (s *Conflict_clauseContext) REPLACE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserREPLACE_, 0) -} - -func (s *Conflict_clauseContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Conflict_clauseContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Conflict_clauseContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterConflict_clause(s) - } -} - -func (s *Conflict_clauseContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitConflict_clause(s) - } -} - -func (p *SQLiteParser) Conflict_clause() (localctx IConflict_clauseContext) { - this := p - _ = this - - localctx = NewConflict_clauseContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 40, SQLiteParserRULE_conflict_clause) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(631) - p.Match(SQLiteParserON_) - } - { - p.SetState(632) - p.Match(SQLiteParserCONFLICT_) - } - { - p.SetState(633) - _la = p.GetTokenStream().LA(1) - - if !(_la == SQLiteParserABORT_ || ((int64((_la-72)) & ^0x3f) == 0 && ((int64(1)<<(_la-72))&19140298416325121) != 0)) { - p.GetErrorHandler().RecoverInline(p) - } else { - p.GetErrorHandler().ReportMatch(p) - p.Consume() - } - } - - return localctx -} - -// ICreate_trigger_stmtContext is an interface to support dynamic dispatch. -type ICreate_trigger_stmtContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - CREATE_() antlr.TerminalNode - TRIGGER_() antlr.TerminalNode - Trigger_name() ITrigger_nameContext - ON_() antlr.TerminalNode - Table_name() ITable_nameContext - BEGIN_() antlr.TerminalNode - END_() antlr.TerminalNode - DELETE_() antlr.TerminalNode - INSERT_() antlr.TerminalNode - UPDATE_() antlr.TerminalNode - IF_() antlr.TerminalNode - NOT_() antlr.TerminalNode - EXISTS_() antlr.TerminalNode - Schema_name() ISchema_nameContext - DOT() antlr.TerminalNode - BEFORE_() antlr.TerminalNode - AFTER_() antlr.TerminalNode - INSTEAD_() antlr.TerminalNode - AllOF_() []antlr.TerminalNode - OF_(i int) antlr.TerminalNode - FOR_() antlr.TerminalNode - EACH_() antlr.TerminalNode - ROW_() antlr.TerminalNode - WHEN_() antlr.TerminalNode - Expr() IExprContext - AllSCOL() []antlr.TerminalNode - SCOL(i int) antlr.TerminalNode - TEMP_() antlr.TerminalNode - TEMPORARY_() antlr.TerminalNode - AllColumn_name() []IColumn_nameContext - Column_name(i int) IColumn_nameContext - AllUpdate_stmt() []IUpdate_stmtContext - Update_stmt(i int) IUpdate_stmtContext - AllInsert_stmt() []IInsert_stmtContext - Insert_stmt(i int) IInsert_stmtContext - AllDelete_stmt() []IDelete_stmtContext - Delete_stmt(i int) IDelete_stmtContext - AllSelect_stmt() []ISelect_stmtContext - Select_stmt(i int) ISelect_stmtContext - AllCOMMA() []antlr.TerminalNode - COMMA(i int) antlr.TerminalNode - - // IsCreate_trigger_stmtContext differentiates from other interfaces. - IsCreate_trigger_stmtContext() -} - -type Create_trigger_stmtContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyCreate_trigger_stmtContext() *Create_trigger_stmtContext { - var p = new(Create_trigger_stmtContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_create_trigger_stmt - return p -} - -func (*Create_trigger_stmtContext) IsCreate_trigger_stmtContext() {} - -func NewCreate_trigger_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Create_trigger_stmtContext { - var p = new(Create_trigger_stmtContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_create_trigger_stmt - - return p -} - -func (s *Create_trigger_stmtContext) GetParser() antlr.Parser { return s.parser } - -func (s *Create_trigger_stmtContext) CREATE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserCREATE_, 0) -} - -func (s *Create_trigger_stmtContext) TRIGGER_() antlr.TerminalNode { - return s.GetToken(SQLiteParserTRIGGER_, 0) -} - -func (s *Create_trigger_stmtContext) Trigger_name() ITrigger_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ITrigger_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ITrigger_nameContext) -} - -func (s *Create_trigger_stmtContext) ON_() antlr.TerminalNode { - return s.GetToken(SQLiteParserON_, 0) -} - -func (s *Create_trigger_stmtContext) Table_name() ITable_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ITable_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ITable_nameContext) -} - -func (s *Create_trigger_stmtContext) BEGIN_() antlr.TerminalNode { - return s.GetToken(SQLiteParserBEGIN_, 0) -} - -func (s *Create_trigger_stmtContext) END_() antlr.TerminalNode { - return s.GetToken(SQLiteParserEND_, 0) -} - -func (s *Create_trigger_stmtContext) DELETE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserDELETE_, 0) -} - -func (s *Create_trigger_stmtContext) INSERT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserINSERT_, 0) -} - -func (s *Create_trigger_stmtContext) UPDATE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserUPDATE_, 0) -} - -func (s *Create_trigger_stmtContext) IF_() antlr.TerminalNode { - return s.GetToken(SQLiteParserIF_, 0) -} - -func (s *Create_trigger_stmtContext) NOT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserNOT_, 0) -} - -func (s *Create_trigger_stmtContext) EXISTS_() antlr.TerminalNode { - return s.GetToken(SQLiteParserEXISTS_, 0) -} - -func (s *Create_trigger_stmtContext) Schema_name() ISchema_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ISchema_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ISchema_nameContext) -} - -func (s *Create_trigger_stmtContext) DOT() antlr.TerminalNode { - return s.GetToken(SQLiteParserDOT, 0) -} - -func (s *Create_trigger_stmtContext) BEFORE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserBEFORE_, 0) -} - -func (s *Create_trigger_stmtContext) AFTER_() antlr.TerminalNode { - return s.GetToken(SQLiteParserAFTER_, 0) -} - -func (s *Create_trigger_stmtContext) INSTEAD_() antlr.TerminalNode { - return s.GetToken(SQLiteParserINSTEAD_, 0) -} - -func (s *Create_trigger_stmtContext) AllOF_() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserOF_) -} - -func (s *Create_trigger_stmtContext) OF_(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserOF_, i) -} - -func (s *Create_trigger_stmtContext) FOR_() antlr.TerminalNode { - return s.GetToken(SQLiteParserFOR_, 0) -} - -func (s *Create_trigger_stmtContext) EACH_() antlr.TerminalNode { - return s.GetToken(SQLiteParserEACH_, 0) -} - -func (s *Create_trigger_stmtContext) ROW_() antlr.TerminalNode { - return s.GetToken(SQLiteParserROW_, 0) -} - -func (s *Create_trigger_stmtContext) WHEN_() antlr.TerminalNode { - return s.GetToken(SQLiteParserWHEN_, 0) -} - -func (s *Create_trigger_stmtContext) Expr() IExprContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IExprContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IExprContext) -} - -func (s *Create_trigger_stmtContext) AllSCOL() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserSCOL) -} - -func (s *Create_trigger_stmtContext) SCOL(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserSCOL, i) -} - -func (s *Create_trigger_stmtContext) TEMP_() antlr.TerminalNode { - return s.GetToken(SQLiteParserTEMP_, 0) -} - -func (s *Create_trigger_stmtContext) TEMPORARY_() antlr.TerminalNode { - return s.GetToken(SQLiteParserTEMPORARY_, 0) -} - -func (s *Create_trigger_stmtContext) AllColumn_name() []IColumn_nameContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IColumn_nameContext); ok { - len++ - } - } - - tst := make([]IColumn_nameContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IColumn_nameContext); ok { - tst[i] = t.(IColumn_nameContext) - i++ - } - } - - return tst -} - -func (s *Create_trigger_stmtContext) Column_name(i int) IColumn_nameContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IColumn_nameContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IColumn_nameContext) -} - -func (s *Create_trigger_stmtContext) AllUpdate_stmt() []IUpdate_stmtContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IUpdate_stmtContext); ok { - len++ - } - } - - tst := make([]IUpdate_stmtContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IUpdate_stmtContext); ok { - tst[i] = t.(IUpdate_stmtContext) - i++ - } - } - - return tst -} - -func (s *Create_trigger_stmtContext) Update_stmt(i int) IUpdate_stmtContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IUpdate_stmtContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IUpdate_stmtContext) -} - -func (s *Create_trigger_stmtContext) AllInsert_stmt() []IInsert_stmtContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IInsert_stmtContext); ok { - len++ - } - } - - tst := make([]IInsert_stmtContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IInsert_stmtContext); ok { - tst[i] = t.(IInsert_stmtContext) - i++ - } - } - - return tst -} - -func (s *Create_trigger_stmtContext) Insert_stmt(i int) IInsert_stmtContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IInsert_stmtContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IInsert_stmtContext) -} - -func (s *Create_trigger_stmtContext) AllDelete_stmt() []IDelete_stmtContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IDelete_stmtContext); ok { - len++ - } - } - - tst := make([]IDelete_stmtContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IDelete_stmtContext); ok { - tst[i] = t.(IDelete_stmtContext) - i++ - } - } - - return tst -} - -func (s *Create_trigger_stmtContext) Delete_stmt(i int) IDelete_stmtContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IDelete_stmtContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IDelete_stmtContext) -} - -func (s *Create_trigger_stmtContext) AllSelect_stmt() []ISelect_stmtContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(ISelect_stmtContext); ok { - len++ - } - } - - tst := make([]ISelect_stmtContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(ISelect_stmtContext); ok { - tst[i] = t.(ISelect_stmtContext) - i++ - } - } - - return tst -} - -func (s *Create_trigger_stmtContext) Select_stmt(i int) ISelect_stmtContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ISelect_stmtContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(ISelect_stmtContext) -} - -func (s *Create_trigger_stmtContext) AllCOMMA() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserCOMMA) -} - -func (s *Create_trigger_stmtContext) COMMA(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserCOMMA, i) -} - -func (s *Create_trigger_stmtContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Create_trigger_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Create_trigger_stmtContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterCreate_trigger_stmt(s) - } -} - -func (s *Create_trigger_stmtContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitCreate_trigger_stmt(s) - } -} - -func (p *SQLiteParser) Create_trigger_stmt() (localctx ICreate_trigger_stmtContext) { - this := p - _ = this - - localctx = NewCreate_trigger_stmtContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 42, SQLiteParserRULE_create_trigger_stmt) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(635) - p.Match(SQLiteParserCREATE_) - } - p.SetState(637) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserTEMP_ || _la == SQLiteParserTEMPORARY_ { - { - p.SetState(636) - _la = p.GetTokenStream().LA(1) - - if !(_la == SQLiteParserTEMP_ || _la == SQLiteParserTEMPORARY_) { - p.GetErrorHandler().RecoverInline(p) - } else { - p.GetErrorHandler().ReportMatch(p) - p.Consume() - } - } - - } - { - p.SetState(639) - p.Match(SQLiteParserTRIGGER_) - } - p.SetState(643) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 71, p.GetParserRuleContext()) == 1 { - { - p.SetState(640) - p.Match(SQLiteParserIF_) - } - { - p.SetState(641) - p.Match(SQLiteParserNOT_) - } - { - p.SetState(642) - p.Match(SQLiteParserEXISTS_) - } - - } - p.SetState(648) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 72, p.GetParserRuleContext()) == 1 { - { - p.SetState(645) - p.Schema_name() - } - { - p.SetState(646) - p.Match(SQLiteParserDOT) - } - - } - { - p.SetState(650) - p.Trigger_name() - } - p.SetState(655) - p.GetErrorHandler().Sync(p) - - switch p.GetTokenStream().LA(1) { - case SQLiteParserBEFORE_: - { - p.SetState(651) - p.Match(SQLiteParserBEFORE_) - } - - case SQLiteParserAFTER_: - { - p.SetState(652) - p.Match(SQLiteParserAFTER_) - } - - case SQLiteParserINSTEAD_: - { - p.SetState(653) - p.Match(SQLiteParserINSTEAD_) - } - { - p.SetState(654) - p.Match(SQLiteParserOF_) - } - - case SQLiteParserDELETE_, SQLiteParserINSERT_, SQLiteParserUPDATE_: - - default: - } - p.SetState(671) - p.GetErrorHandler().Sync(p) - - switch p.GetTokenStream().LA(1) { - case SQLiteParserDELETE_: - { - p.SetState(657) - p.Match(SQLiteParserDELETE_) - } - - case SQLiteParserINSERT_: - { - p.SetState(658) - p.Match(SQLiteParserINSERT_) - } - - case SQLiteParserUPDATE_: - { - p.SetState(659) - p.Match(SQLiteParserUPDATE_) - } - p.SetState(669) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserOF_ { - { - p.SetState(660) - p.Match(SQLiteParserOF_) - } - { - p.SetState(661) - p.Column_name() - } - p.SetState(666) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for _la == SQLiteParserCOMMA { - { - p.SetState(662) - p.Match(SQLiteParserCOMMA) - } - { - p.SetState(663) - p.Column_name() - } - - p.SetState(668) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - - } - - default: - panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) - } - { - p.SetState(673) - p.Match(SQLiteParserON_) - } - { - p.SetState(674) - p.Table_name() - } - p.SetState(678) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserFOR_ { - { - p.SetState(675) - p.Match(SQLiteParserFOR_) - } - { - p.SetState(676) - p.Match(SQLiteParserEACH_) - } - { - p.SetState(677) - p.Match(SQLiteParserROW_) - } - - } - p.SetState(682) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserWHEN_ { - { - p.SetState(680) - p.Match(SQLiteParserWHEN_) - } - { - p.SetState(681) - p.expr(0) - } - - } - { - p.SetState(684) - p.Match(SQLiteParserBEGIN_) - } - p.SetState(693) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for ok := true; ok; ok = _la == SQLiteParserDELETE_ || ((int64((_la-88)) & ^0x3f) == 0 && ((int64(1)<<(_la-88))&2386912217732743169) != 0) { - p.SetState(689) - p.GetErrorHandler().Sync(p) - switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 79, p.GetParserRuleContext()) { - case 1: - { - p.SetState(685) - p.Update_stmt() - } - - case 2: - { - p.SetState(686) - p.Insert_stmt() - } - - case 3: - { - p.SetState(687) - p.Delete_stmt() - } - - case 4: - { - p.SetState(688) - p.Select_stmt() - } - - } - { - p.SetState(691) - p.Match(SQLiteParserSCOL) - } - - p.SetState(695) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - { - p.SetState(697) - p.Match(SQLiteParserEND_) - } - - return localctx -} - -// ICreate_view_stmtContext is an interface to support dynamic dispatch. -type ICreate_view_stmtContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - CREATE_() antlr.TerminalNode - VIEW_() antlr.TerminalNode - View_name() IView_nameContext - AS_() antlr.TerminalNode - Select_stmt() ISelect_stmtContext - IF_() antlr.TerminalNode - NOT_() antlr.TerminalNode - EXISTS_() antlr.TerminalNode - Schema_name() ISchema_nameContext - DOT() antlr.TerminalNode - OPEN_PAR() antlr.TerminalNode - AllColumn_name() []IColumn_nameContext - Column_name(i int) IColumn_nameContext - CLOSE_PAR() antlr.TerminalNode - TEMP_() antlr.TerminalNode - TEMPORARY_() antlr.TerminalNode - AllCOMMA() []antlr.TerminalNode - COMMA(i int) antlr.TerminalNode - - // IsCreate_view_stmtContext differentiates from other interfaces. - IsCreate_view_stmtContext() -} - -type Create_view_stmtContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyCreate_view_stmtContext() *Create_view_stmtContext { - var p = new(Create_view_stmtContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_create_view_stmt - return p -} - -func (*Create_view_stmtContext) IsCreate_view_stmtContext() {} - -func NewCreate_view_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Create_view_stmtContext { - var p = new(Create_view_stmtContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_create_view_stmt - - return p -} - -func (s *Create_view_stmtContext) GetParser() antlr.Parser { return s.parser } - -func (s *Create_view_stmtContext) CREATE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserCREATE_, 0) -} - -func (s *Create_view_stmtContext) VIEW_() antlr.TerminalNode { - return s.GetToken(SQLiteParserVIEW_, 0) -} - -func (s *Create_view_stmtContext) View_name() IView_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IView_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IView_nameContext) -} - -func (s *Create_view_stmtContext) AS_() antlr.TerminalNode { - return s.GetToken(SQLiteParserAS_, 0) -} - -func (s *Create_view_stmtContext) Select_stmt() ISelect_stmtContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ISelect_stmtContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ISelect_stmtContext) -} - -func (s *Create_view_stmtContext) IF_() antlr.TerminalNode { - return s.GetToken(SQLiteParserIF_, 0) -} - -func (s *Create_view_stmtContext) NOT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserNOT_, 0) -} - -func (s *Create_view_stmtContext) EXISTS_() antlr.TerminalNode { - return s.GetToken(SQLiteParserEXISTS_, 0) -} - -func (s *Create_view_stmtContext) Schema_name() ISchema_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ISchema_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ISchema_nameContext) -} - -func (s *Create_view_stmtContext) DOT() antlr.TerminalNode { - return s.GetToken(SQLiteParserDOT, 0) -} - -func (s *Create_view_stmtContext) OPEN_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserOPEN_PAR, 0) -} - -func (s *Create_view_stmtContext) AllColumn_name() []IColumn_nameContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IColumn_nameContext); ok { - len++ - } - } - - tst := make([]IColumn_nameContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IColumn_nameContext); ok { - tst[i] = t.(IColumn_nameContext) - i++ - } - } - - return tst -} - -func (s *Create_view_stmtContext) Column_name(i int) IColumn_nameContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IColumn_nameContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IColumn_nameContext) -} - -func (s *Create_view_stmtContext) CLOSE_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserCLOSE_PAR, 0) -} - -func (s *Create_view_stmtContext) TEMP_() antlr.TerminalNode { - return s.GetToken(SQLiteParserTEMP_, 0) -} - -func (s *Create_view_stmtContext) TEMPORARY_() antlr.TerminalNode { - return s.GetToken(SQLiteParserTEMPORARY_, 0) -} - -func (s *Create_view_stmtContext) AllCOMMA() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserCOMMA) -} - -func (s *Create_view_stmtContext) COMMA(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserCOMMA, i) -} - -func (s *Create_view_stmtContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Create_view_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Create_view_stmtContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterCreate_view_stmt(s) - } -} - -func (s *Create_view_stmtContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitCreate_view_stmt(s) - } -} - -func (p *SQLiteParser) Create_view_stmt() (localctx ICreate_view_stmtContext) { - this := p - _ = this - - localctx = NewCreate_view_stmtContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 44, SQLiteParserRULE_create_view_stmt) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(699) - p.Match(SQLiteParserCREATE_) - } - p.SetState(701) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserTEMP_ || _la == SQLiteParserTEMPORARY_ { - { - p.SetState(700) - _la = p.GetTokenStream().LA(1) - - if !(_la == SQLiteParserTEMP_ || _la == SQLiteParserTEMPORARY_) { - p.GetErrorHandler().RecoverInline(p) - } else { - p.GetErrorHandler().ReportMatch(p) - p.Consume() - } - } - - } - { - p.SetState(703) - p.Match(SQLiteParserVIEW_) - } - p.SetState(707) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 82, p.GetParserRuleContext()) == 1 { - { - p.SetState(704) - p.Match(SQLiteParserIF_) - } - { - p.SetState(705) - p.Match(SQLiteParserNOT_) - } - { - p.SetState(706) - p.Match(SQLiteParserEXISTS_) - } - - } - p.SetState(712) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 83, p.GetParserRuleContext()) == 1 { - { - p.SetState(709) - p.Schema_name() - } - { - p.SetState(710) - p.Match(SQLiteParserDOT) - } - - } - { - p.SetState(714) - p.View_name() - } - p.SetState(726) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserOPEN_PAR { - { - p.SetState(715) - p.Match(SQLiteParserOPEN_PAR) - } - { - p.SetState(716) - p.Column_name() - } - p.SetState(721) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for _la == SQLiteParserCOMMA { - { - p.SetState(717) - p.Match(SQLiteParserCOMMA) - } - { - p.SetState(718) - p.Column_name() - } - - p.SetState(723) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - { - p.SetState(724) - p.Match(SQLiteParserCLOSE_PAR) - } - - } - { - p.SetState(728) - p.Match(SQLiteParserAS_) - } - { - p.SetState(729) - p.Select_stmt() - } - - return localctx -} - -// ICreate_virtual_table_stmtContext is an interface to support dynamic dispatch. -type ICreate_virtual_table_stmtContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - CREATE_() antlr.TerminalNode - VIRTUAL_() antlr.TerminalNode - TABLE_() antlr.TerminalNode - Table_name() ITable_nameContext - USING_() antlr.TerminalNode - Module_name() IModule_nameContext - IF_() antlr.TerminalNode - NOT_() antlr.TerminalNode - EXISTS_() antlr.TerminalNode - Schema_name() ISchema_nameContext - DOT() antlr.TerminalNode - OPEN_PAR() antlr.TerminalNode - AllModule_argument() []IModule_argumentContext - Module_argument(i int) IModule_argumentContext - CLOSE_PAR() antlr.TerminalNode - AllCOMMA() []antlr.TerminalNode - COMMA(i int) antlr.TerminalNode - - // IsCreate_virtual_table_stmtContext differentiates from other interfaces. - IsCreate_virtual_table_stmtContext() -} - -type Create_virtual_table_stmtContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyCreate_virtual_table_stmtContext() *Create_virtual_table_stmtContext { - var p = new(Create_virtual_table_stmtContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_create_virtual_table_stmt - return p -} - -func (*Create_virtual_table_stmtContext) IsCreate_virtual_table_stmtContext() {} - -func NewCreate_virtual_table_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Create_virtual_table_stmtContext { - var p = new(Create_virtual_table_stmtContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_create_virtual_table_stmt - - return p -} - -func (s *Create_virtual_table_stmtContext) GetParser() antlr.Parser { return s.parser } - -func (s *Create_virtual_table_stmtContext) CREATE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserCREATE_, 0) -} - -func (s *Create_virtual_table_stmtContext) VIRTUAL_() antlr.TerminalNode { - return s.GetToken(SQLiteParserVIRTUAL_, 0) -} - -func (s *Create_virtual_table_stmtContext) TABLE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserTABLE_, 0) -} - -func (s *Create_virtual_table_stmtContext) Table_name() ITable_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ITable_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ITable_nameContext) -} - -func (s *Create_virtual_table_stmtContext) USING_() antlr.TerminalNode { - return s.GetToken(SQLiteParserUSING_, 0) -} - -func (s *Create_virtual_table_stmtContext) Module_name() IModule_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IModule_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IModule_nameContext) -} - -func (s *Create_virtual_table_stmtContext) IF_() antlr.TerminalNode { - return s.GetToken(SQLiteParserIF_, 0) -} - -func (s *Create_virtual_table_stmtContext) NOT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserNOT_, 0) -} - -func (s *Create_virtual_table_stmtContext) EXISTS_() antlr.TerminalNode { - return s.GetToken(SQLiteParserEXISTS_, 0) -} - -func (s *Create_virtual_table_stmtContext) Schema_name() ISchema_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ISchema_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ISchema_nameContext) -} - -func (s *Create_virtual_table_stmtContext) DOT() antlr.TerminalNode { - return s.GetToken(SQLiteParserDOT, 0) -} - -func (s *Create_virtual_table_stmtContext) OPEN_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserOPEN_PAR, 0) -} - -func (s *Create_virtual_table_stmtContext) AllModule_argument() []IModule_argumentContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IModule_argumentContext); ok { - len++ - } - } - - tst := make([]IModule_argumentContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IModule_argumentContext); ok { - tst[i] = t.(IModule_argumentContext) - i++ - } - } - - return tst -} - -func (s *Create_virtual_table_stmtContext) Module_argument(i int) IModule_argumentContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IModule_argumentContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IModule_argumentContext) -} - -func (s *Create_virtual_table_stmtContext) CLOSE_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserCLOSE_PAR, 0) -} - -func (s *Create_virtual_table_stmtContext) AllCOMMA() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserCOMMA) -} - -func (s *Create_virtual_table_stmtContext) COMMA(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserCOMMA, i) -} - -func (s *Create_virtual_table_stmtContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Create_virtual_table_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Create_virtual_table_stmtContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterCreate_virtual_table_stmt(s) - } -} - -func (s *Create_virtual_table_stmtContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitCreate_virtual_table_stmt(s) - } -} - -func (p *SQLiteParser) Create_virtual_table_stmt() (localctx ICreate_virtual_table_stmtContext) { - this := p - _ = this - - localctx = NewCreate_virtual_table_stmtContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 46, SQLiteParserRULE_create_virtual_table_stmt) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(731) - p.Match(SQLiteParserCREATE_) - } - { - p.SetState(732) - p.Match(SQLiteParserVIRTUAL_) - } - { - p.SetState(733) - p.Match(SQLiteParserTABLE_) - } - p.SetState(737) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 86, p.GetParserRuleContext()) == 1 { - { - p.SetState(734) - p.Match(SQLiteParserIF_) - } - { - p.SetState(735) - p.Match(SQLiteParserNOT_) - } - { - p.SetState(736) - p.Match(SQLiteParserEXISTS_) - } - - } - p.SetState(742) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 87, p.GetParserRuleContext()) == 1 { - { - p.SetState(739) - p.Schema_name() - } - { - p.SetState(740) - p.Match(SQLiteParserDOT) - } - - } - { - p.SetState(744) - p.Table_name() - } - { - p.SetState(745) - p.Match(SQLiteParserUSING_) - } - { - p.SetState(746) - p.Module_name() - } - p.SetState(758) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserOPEN_PAR { - { - p.SetState(747) - p.Match(SQLiteParserOPEN_PAR) - } - { - p.SetState(748) - p.Module_argument() - } - p.SetState(753) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for _la == SQLiteParserCOMMA { - { - p.SetState(749) - p.Match(SQLiteParserCOMMA) - } - { - p.SetState(750) - p.Module_argument() - } - - p.SetState(755) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - { - p.SetState(756) - p.Match(SQLiteParserCLOSE_PAR) - } - - } - - return localctx -} - -// IWith_clauseContext is an interface to support dynamic dispatch. -type IWith_clauseContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - WITH_() antlr.TerminalNode - AllCte_table_name() []ICte_table_nameContext - Cte_table_name(i int) ICte_table_nameContext - AllAS_() []antlr.TerminalNode - AS_(i int) antlr.TerminalNode - AllOPEN_PAR() []antlr.TerminalNode - OPEN_PAR(i int) antlr.TerminalNode - AllSelect_stmt() []ISelect_stmtContext - Select_stmt(i int) ISelect_stmtContext - AllCLOSE_PAR() []antlr.TerminalNode - CLOSE_PAR(i int) antlr.TerminalNode - RECURSIVE_() antlr.TerminalNode - AllCOMMA() []antlr.TerminalNode - COMMA(i int) antlr.TerminalNode - - // IsWith_clauseContext differentiates from other interfaces. - IsWith_clauseContext() -} - -type With_clauseContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyWith_clauseContext() *With_clauseContext { - var p = new(With_clauseContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_with_clause - return p -} - -func (*With_clauseContext) IsWith_clauseContext() {} - -func NewWith_clauseContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *With_clauseContext { - var p = new(With_clauseContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_with_clause - - return p -} - -func (s *With_clauseContext) GetParser() antlr.Parser { return s.parser } - -func (s *With_clauseContext) WITH_() antlr.TerminalNode { - return s.GetToken(SQLiteParserWITH_, 0) -} - -func (s *With_clauseContext) AllCte_table_name() []ICte_table_nameContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(ICte_table_nameContext); ok { - len++ - } - } - - tst := make([]ICte_table_nameContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(ICte_table_nameContext); ok { - tst[i] = t.(ICte_table_nameContext) - i++ - } - } - - return tst -} - -func (s *With_clauseContext) Cte_table_name(i int) ICte_table_nameContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ICte_table_nameContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(ICte_table_nameContext) -} - -func (s *With_clauseContext) AllAS_() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserAS_) -} - -func (s *With_clauseContext) AS_(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserAS_, i) -} - -func (s *With_clauseContext) AllOPEN_PAR() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserOPEN_PAR) -} - -func (s *With_clauseContext) OPEN_PAR(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserOPEN_PAR, i) -} - -func (s *With_clauseContext) AllSelect_stmt() []ISelect_stmtContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(ISelect_stmtContext); ok { - len++ - } - } - - tst := make([]ISelect_stmtContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(ISelect_stmtContext); ok { - tst[i] = t.(ISelect_stmtContext) - i++ - } - } - - return tst -} - -func (s *With_clauseContext) Select_stmt(i int) ISelect_stmtContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ISelect_stmtContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(ISelect_stmtContext) -} - -func (s *With_clauseContext) AllCLOSE_PAR() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserCLOSE_PAR) -} - -func (s *With_clauseContext) CLOSE_PAR(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserCLOSE_PAR, i) -} - -func (s *With_clauseContext) RECURSIVE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserRECURSIVE_, 0) -} - -func (s *With_clauseContext) AllCOMMA() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserCOMMA) -} - -func (s *With_clauseContext) COMMA(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserCOMMA, i) -} - -func (s *With_clauseContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *With_clauseContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *With_clauseContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterWith_clause(s) - } -} - -func (s *With_clauseContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitWith_clause(s) - } -} - -func (p *SQLiteParser) With_clause() (localctx IWith_clauseContext) { - this := p - _ = this - - localctx = NewWith_clauseContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 48, SQLiteParserRULE_with_clause) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(760) - p.Match(SQLiteParserWITH_) - } - p.SetState(762) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 90, p.GetParserRuleContext()) == 1 { - { - p.SetState(761) - p.Match(SQLiteParserRECURSIVE_) - } - - } - { - p.SetState(764) - p.Cte_table_name() - } - { - p.SetState(765) - p.Match(SQLiteParserAS_) - } - { - p.SetState(766) - p.Match(SQLiteParserOPEN_PAR) - } - { - p.SetState(767) - p.Select_stmt() - } - { - p.SetState(768) - p.Match(SQLiteParserCLOSE_PAR) - } - p.SetState(778) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for _la == SQLiteParserCOMMA { - { - p.SetState(769) - p.Match(SQLiteParserCOMMA) - } - { - p.SetState(770) - p.Cte_table_name() - } - { - p.SetState(771) - p.Match(SQLiteParserAS_) - } - { - p.SetState(772) - p.Match(SQLiteParserOPEN_PAR) - } - { - p.SetState(773) - p.Select_stmt() - } - { - p.SetState(774) - p.Match(SQLiteParserCLOSE_PAR) - } - - p.SetState(780) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - - return localctx -} - -// ICte_table_nameContext is an interface to support dynamic dispatch. -type ICte_table_nameContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - Table_name() ITable_nameContext - OPEN_PAR() antlr.TerminalNode - AllColumn_name() []IColumn_nameContext - Column_name(i int) IColumn_nameContext - CLOSE_PAR() antlr.TerminalNode - AllCOMMA() []antlr.TerminalNode - COMMA(i int) antlr.TerminalNode - - // IsCte_table_nameContext differentiates from other interfaces. - IsCte_table_nameContext() -} - -type Cte_table_nameContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyCte_table_nameContext() *Cte_table_nameContext { - var p = new(Cte_table_nameContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_cte_table_name - return p -} - -func (*Cte_table_nameContext) IsCte_table_nameContext() {} - -func NewCte_table_nameContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Cte_table_nameContext { - var p = new(Cte_table_nameContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_cte_table_name - - return p -} - -func (s *Cte_table_nameContext) GetParser() antlr.Parser { return s.parser } - -func (s *Cte_table_nameContext) Table_name() ITable_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ITable_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ITable_nameContext) -} - -func (s *Cte_table_nameContext) OPEN_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserOPEN_PAR, 0) -} - -func (s *Cte_table_nameContext) AllColumn_name() []IColumn_nameContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IColumn_nameContext); ok { - len++ - } - } - - tst := make([]IColumn_nameContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IColumn_nameContext); ok { - tst[i] = t.(IColumn_nameContext) - i++ - } - } - - return tst -} - -func (s *Cte_table_nameContext) Column_name(i int) IColumn_nameContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IColumn_nameContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IColumn_nameContext) -} - -func (s *Cte_table_nameContext) CLOSE_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserCLOSE_PAR, 0) -} - -func (s *Cte_table_nameContext) AllCOMMA() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserCOMMA) -} - -func (s *Cte_table_nameContext) COMMA(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserCOMMA, i) -} - -func (s *Cte_table_nameContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Cte_table_nameContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Cte_table_nameContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterCte_table_name(s) - } -} - -func (s *Cte_table_nameContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitCte_table_name(s) - } -} - -func (p *SQLiteParser) Cte_table_name() (localctx ICte_table_nameContext) { - this := p - _ = this - - localctx = NewCte_table_nameContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 50, SQLiteParserRULE_cte_table_name) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(781) - p.Table_name() - } - p.SetState(793) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserOPEN_PAR { - { - p.SetState(782) - p.Match(SQLiteParserOPEN_PAR) - } - { - p.SetState(783) - p.Column_name() - } - p.SetState(788) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for _la == SQLiteParserCOMMA { - { - p.SetState(784) - p.Match(SQLiteParserCOMMA) - } - { - p.SetState(785) - p.Column_name() - } - - p.SetState(790) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - { - p.SetState(791) - p.Match(SQLiteParserCLOSE_PAR) - } - - } - - return localctx -} - -// IRecursive_cteContext is an interface to support dynamic dispatch. -type IRecursive_cteContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - Cte_table_name() ICte_table_nameContext - AS_() antlr.TerminalNode - OPEN_PAR() antlr.TerminalNode - Initial_select() IInitial_selectContext - UNION_() antlr.TerminalNode - Recursive_select() IRecursive_selectContext - CLOSE_PAR() antlr.TerminalNode - ALL_() antlr.TerminalNode - - // IsRecursive_cteContext differentiates from other interfaces. - IsRecursive_cteContext() -} - -type Recursive_cteContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyRecursive_cteContext() *Recursive_cteContext { - var p = new(Recursive_cteContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_recursive_cte - return p -} - -func (*Recursive_cteContext) IsRecursive_cteContext() {} - -func NewRecursive_cteContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Recursive_cteContext { - var p = new(Recursive_cteContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_recursive_cte - - return p -} - -func (s *Recursive_cteContext) GetParser() antlr.Parser { return s.parser } - -func (s *Recursive_cteContext) Cte_table_name() ICte_table_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ICte_table_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ICte_table_nameContext) -} - -func (s *Recursive_cteContext) AS_() antlr.TerminalNode { - return s.GetToken(SQLiteParserAS_, 0) -} - -func (s *Recursive_cteContext) OPEN_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserOPEN_PAR, 0) -} - -func (s *Recursive_cteContext) Initial_select() IInitial_selectContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IInitial_selectContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IInitial_selectContext) -} - -func (s *Recursive_cteContext) UNION_() antlr.TerminalNode { - return s.GetToken(SQLiteParserUNION_, 0) -} - -func (s *Recursive_cteContext) Recursive_select() IRecursive_selectContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IRecursive_selectContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IRecursive_selectContext) -} - -func (s *Recursive_cteContext) CLOSE_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserCLOSE_PAR, 0) -} - -func (s *Recursive_cteContext) ALL_() antlr.TerminalNode { - return s.GetToken(SQLiteParserALL_, 0) -} - -func (s *Recursive_cteContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Recursive_cteContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Recursive_cteContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterRecursive_cte(s) - } -} - -func (s *Recursive_cteContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitRecursive_cte(s) - } -} - -func (p *SQLiteParser) Recursive_cte() (localctx IRecursive_cteContext) { - this := p - _ = this - - localctx = NewRecursive_cteContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 52, SQLiteParserRULE_recursive_cte) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(795) - p.Cte_table_name() - } - { - p.SetState(796) - p.Match(SQLiteParserAS_) - } - { - p.SetState(797) - p.Match(SQLiteParserOPEN_PAR) - } - { - p.SetState(798) - p.Initial_select() - } - { - p.SetState(799) - p.Match(SQLiteParserUNION_) - } - p.SetState(801) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserALL_ { - { - p.SetState(800) - p.Match(SQLiteParserALL_) - } - - } - { - p.SetState(803) - p.Recursive_select() - } - { - p.SetState(804) - p.Match(SQLiteParserCLOSE_PAR) - } - - return localctx -} - -// ICommon_table_expressionContext is an interface to support dynamic dispatch. -type ICommon_table_expressionContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - Table_name() ITable_nameContext - AS_() antlr.TerminalNode - AllOPEN_PAR() []antlr.TerminalNode - OPEN_PAR(i int) antlr.TerminalNode - Select_stmt() ISelect_stmtContext - AllCLOSE_PAR() []antlr.TerminalNode - CLOSE_PAR(i int) antlr.TerminalNode - AllColumn_name() []IColumn_nameContext - Column_name(i int) IColumn_nameContext - AllCOMMA() []antlr.TerminalNode - COMMA(i int) antlr.TerminalNode - - // IsCommon_table_expressionContext differentiates from other interfaces. - IsCommon_table_expressionContext() -} - -type Common_table_expressionContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyCommon_table_expressionContext() *Common_table_expressionContext { - var p = new(Common_table_expressionContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_common_table_expression - return p -} - -func (*Common_table_expressionContext) IsCommon_table_expressionContext() {} - -func NewCommon_table_expressionContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Common_table_expressionContext { - var p = new(Common_table_expressionContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_common_table_expression - - return p -} - -func (s *Common_table_expressionContext) GetParser() antlr.Parser { return s.parser } - -func (s *Common_table_expressionContext) Table_name() ITable_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ITable_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ITable_nameContext) -} - -func (s *Common_table_expressionContext) AS_() antlr.TerminalNode { - return s.GetToken(SQLiteParserAS_, 0) -} - -func (s *Common_table_expressionContext) AllOPEN_PAR() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserOPEN_PAR) -} - -func (s *Common_table_expressionContext) OPEN_PAR(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserOPEN_PAR, i) -} - -func (s *Common_table_expressionContext) Select_stmt() ISelect_stmtContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ISelect_stmtContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ISelect_stmtContext) -} - -func (s *Common_table_expressionContext) AllCLOSE_PAR() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserCLOSE_PAR) -} - -func (s *Common_table_expressionContext) CLOSE_PAR(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserCLOSE_PAR, i) -} - -func (s *Common_table_expressionContext) AllColumn_name() []IColumn_nameContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IColumn_nameContext); ok { - len++ - } - } - - tst := make([]IColumn_nameContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IColumn_nameContext); ok { - tst[i] = t.(IColumn_nameContext) - i++ - } - } - - return tst -} - -func (s *Common_table_expressionContext) Column_name(i int) IColumn_nameContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IColumn_nameContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IColumn_nameContext) -} - -func (s *Common_table_expressionContext) AllCOMMA() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserCOMMA) -} - -func (s *Common_table_expressionContext) COMMA(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserCOMMA, i) -} - -func (s *Common_table_expressionContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Common_table_expressionContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Common_table_expressionContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterCommon_table_expression(s) - } -} - -func (s *Common_table_expressionContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitCommon_table_expression(s) - } -} - -func (p *SQLiteParser) Common_table_expression() (localctx ICommon_table_expressionContext) { - this := p - _ = this - - localctx = NewCommon_table_expressionContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 54, SQLiteParserRULE_common_table_expression) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(806) - p.Table_name() - } - p.SetState(818) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserOPEN_PAR { - { - p.SetState(807) - p.Match(SQLiteParserOPEN_PAR) - } - { - p.SetState(808) - p.Column_name() - } - p.SetState(813) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for _la == SQLiteParserCOMMA { - { - p.SetState(809) - p.Match(SQLiteParserCOMMA) - } - { - p.SetState(810) - p.Column_name() - } - - p.SetState(815) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - { - p.SetState(816) - p.Match(SQLiteParserCLOSE_PAR) - } - - } - { - p.SetState(820) - p.Match(SQLiteParserAS_) - } - { - p.SetState(821) - p.Match(SQLiteParserOPEN_PAR) - } - { - p.SetState(822) - p.Select_stmt() - } - { - p.SetState(823) - p.Match(SQLiteParserCLOSE_PAR) - } - - return localctx -} - -// IDelete_stmtContext is an interface to support dynamic dispatch. -type IDelete_stmtContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - DELETE_() antlr.TerminalNode - FROM_() antlr.TerminalNode - Qualified_table_name() IQualified_table_nameContext - With_clause() IWith_clauseContext - WHERE_() antlr.TerminalNode - Expr() IExprContext - Returning_clause() IReturning_clauseContext - - // IsDelete_stmtContext differentiates from other interfaces. - IsDelete_stmtContext() -} - -type Delete_stmtContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyDelete_stmtContext() *Delete_stmtContext { - var p = new(Delete_stmtContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_delete_stmt - return p -} - -func (*Delete_stmtContext) IsDelete_stmtContext() {} - -func NewDelete_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Delete_stmtContext { - var p = new(Delete_stmtContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_delete_stmt - - return p -} - -func (s *Delete_stmtContext) GetParser() antlr.Parser { return s.parser } - -func (s *Delete_stmtContext) DELETE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserDELETE_, 0) -} - -func (s *Delete_stmtContext) FROM_() antlr.TerminalNode { - return s.GetToken(SQLiteParserFROM_, 0) -} - -func (s *Delete_stmtContext) Qualified_table_name() IQualified_table_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IQualified_table_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IQualified_table_nameContext) -} - -func (s *Delete_stmtContext) With_clause() IWith_clauseContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IWith_clauseContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IWith_clauseContext) -} - -func (s *Delete_stmtContext) WHERE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserWHERE_, 0) -} - -func (s *Delete_stmtContext) Expr() IExprContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IExprContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IExprContext) -} - -func (s *Delete_stmtContext) Returning_clause() IReturning_clauseContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IReturning_clauseContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IReturning_clauseContext) -} - -func (s *Delete_stmtContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Delete_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Delete_stmtContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterDelete_stmt(s) - } -} - -func (s *Delete_stmtContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitDelete_stmt(s) - } -} - -func (p *SQLiteParser) Delete_stmt() (localctx IDelete_stmtContext) { - this := p - _ = this - - localctx = NewDelete_stmtContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 56, SQLiteParserRULE_delete_stmt) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - p.SetState(826) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserWITH_ { - { - p.SetState(825) - p.With_clause() - } - - } - { - p.SetState(828) - p.Match(SQLiteParserDELETE_) - } - { - p.SetState(829) - p.Match(SQLiteParserFROM_) - } - { - p.SetState(830) - p.Qualified_table_name() - } - p.SetState(833) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserWHERE_ { - { - p.SetState(831) - p.Match(SQLiteParserWHERE_) - } - { - p.SetState(832) - p.expr(0) - } - - } - p.SetState(836) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserRETURNING_ { - { - p.SetState(835) - p.Returning_clause() - } - - } - - return localctx -} - -// IDelete_stmt_limitedContext is an interface to support dynamic dispatch. -type IDelete_stmt_limitedContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - DELETE_() antlr.TerminalNode - FROM_() antlr.TerminalNode - Qualified_table_name() IQualified_table_nameContext - With_clause() IWith_clauseContext - WHERE_() antlr.TerminalNode - Expr() IExprContext - Returning_clause() IReturning_clauseContext - Limit_stmt() ILimit_stmtContext - Order_by_stmt() IOrder_by_stmtContext - - // IsDelete_stmt_limitedContext differentiates from other interfaces. - IsDelete_stmt_limitedContext() -} - -type Delete_stmt_limitedContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyDelete_stmt_limitedContext() *Delete_stmt_limitedContext { - var p = new(Delete_stmt_limitedContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_delete_stmt_limited - return p -} - -func (*Delete_stmt_limitedContext) IsDelete_stmt_limitedContext() {} - -func NewDelete_stmt_limitedContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Delete_stmt_limitedContext { - var p = new(Delete_stmt_limitedContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_delete_stmt_limited - - return p -} - -func (s *Delete_stmt_limitedContext) GetParser() antlr.Parser { return s.parser } - -func (s *Delete_stmt_limitedContext) DELETE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserDELETE_, 0) -} - -func (s *Delete_stmt_limitedContext) FROM_() antlr.TerminalNode { - return s.GetToken(SQLiteParserFROM_, 0) -} - -func (s *Delete_stmt_limitedContext) Qualified_table_name() IQualified_table_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IQualified_table_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IQualified_table_nameContext) -} - -func (s *Delete_stmt_limitedContext) With_clause() IWith_clauseContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IWith_clauseContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IWith_clauseContext) -} - -func (s *Delete_stmt_limitedContext) WHERE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserWHERE_, 0) -} - -func (s *Delete_stmt_limitedContext) Expr() IExprContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IExprContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IExprContext) -} - -func (s *Delete_stmt_limitedContext) Returning_clause() IReturning_clauseContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IReturning_clauseContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IReturning_clauseContext) -} - -func (s *Delete_stmt_limitedContext) Limit_stmt() ILimit_stmtContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ILimit_stmtContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ILimit_stmtContext) -} - -func (s *Delete_stmt_limitedContext) Order_by_stmt() IOrder_by_stmtContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IOrder_by_stmtContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IOrder_by_stmtContext) -} - -func (s *Delete_stmt_limitedContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Delete_stmt_limitedContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Delete_stmt_limitedContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterDelete_stmt_limited(s) - } -} - -func (s *Delete_stmt_limitedContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitDelete_stmt_limited(s) - } -} - -func (p *SQLiteParser) Delete_stmt_limited() (localctx IDelete_stmt_limitedContext) { - this := p - _ = this - - localctx = NewDelete_stmt_limitedContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 58, SQLiteParserRULE_delete_stmt_limited) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - p.SetState(839) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserWITH_ { - { - p.SetState(838) - p.With_clause() - } - - } - { - p.SetState(841) - p.Match(SQLiteParserDELETE_) - } - { - p.SetState(842) - p.Match(SQLiteParserFROM_) - } - { - p.SetState(843) - p.Qualified_table_name() - } - p.SetState(846) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserWHERE_ { - { - p.SetState(844) - p.Match(SQLiteParserWHERE_) - } - { - p.SetState(845) - p.expr(0) - } - - } - p.SetState(849) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserRETURNING_ { - { - p.SetState(848) - p.Returning_clause() - } - - } - p.SetState(855) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserLIMIT_ || _la == SQLiteParserORDER_ { - p.SetState(852) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserORDER_ { - { - p.SetState(851) - p.Order_by_stmt() - } - - } - { - p.SetState(854) - p.Limit_stmt() - } - - } - - return localctx -} - -// IDetach_stmtContext is an interface to support dynamic dispatch. -type IDetach_stmtContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - DETACH_() antlr.TerminalNode - Schema_name() ISchema_nameContext - DATABASE_() antlr.TerminalNode - - // IsDetach_stmtContext differentiates from other interfaces. - IsDetach_stmtContext() -} - -type Detach_stmtContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyDetach_stmtContext() *Detach_stmtContext { - var p = new(Detach_stmtContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_detach_stmt - return p -} - -func (*Detach_stmtContext) IsDetach_stmtContext() {} - -func NewDetach_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Detach_stmtContext { - var p = new(Detach_stmtContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_detach_stmt - - return p -} - -func (s *Detach_stmtContext) GetParser() antlr.Parser { return s.parser } - -func (s *Detach_stmtContext) DETACH_() antlr.TerminalNode { - return s.GetToken(SQLiteParserDETACH_, 0) -} - -func (s *Detach_stmtContext) Schema_name() ISchema_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ISchema_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ISchema_nameContext) -} - -func (s *Detach_stmtContext) DATABASE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserDATABASE_, 0) -} - -func (s *Detach_stmtContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Detach_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Detach_stmtContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterDetach_stmt(s) - } -} - -func (s *Detach_stmtContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitDetach_stmt(s) - } -} - -func (p *SQLiteParser) Detach_stmt() (localctx IDetach_stmtContext) { - this := p - _ = this - - localctx = NewDetach_stmtContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 60, SQLiteParserRULE_detach_stmt) - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(857) - p.Match(SQLiteParserDETACH_) - } - p.SetState(859) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 105, p.GetParserRuleContext()) == 1 { - { - p.SetState(858) - p.Match(SQLiteParserDATABASE_) - } - - } - { - p.SetState(861) - p.Schema_name() - } - - return localctx -} - -// IDrop_stmtContext is an interface to support dynamic dispatch. -type IDrop_stmtContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // GetObject returns the object token. - GetObject() antlr.Token - - // SetObject sets the object token. - SetObject(antlr.Token) - - // Getter signatures - DROP_() antlr.TerminalNode - Any_name() IAny_nameContext - INDEX_() antlr.TerminalNode - TABLE_() antlr.TerminalNode - TRIGGER_() antlr.TerminalNode - VIEW_() antlr.TerminalNode - IF_() antlr.TerminalNode - EXISTS_() antlr.TerminalNode - Schema_name() ISchema_nameContext - DOT() antlr.TerminalNode - - // IsDrop_stmtContext differentiates from other interfaces. - IsDrop_stmtContext() -} - -type Drop_stmtContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser - object antlr.Token -} - -func NewEmptyDrop_stmtContext() *Drop_stmtContext { - var p = new(Drop_stmtContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_drop_stmt - return p -} - -func (*Drop_stmtContext) IsDrop_stmtContext() {} - -func NewDrop_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Drop_stmtContext { - var p = new(Drop_stmtContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_drop_stmt - - return p -} - -func (s *Drop_stmtContext) GetParser() antlr.Parser { return s.parser } - -func (s *Drop_stmtContext) GetObject() antlr.Token { return s.object } - -func (s *Drop_stmtContext) SetObject(v antlr.Token) { s.object = v } - -func (s *Drop_stmtContext) DROP_() antlr.TerminalNode { - return s.GetToken(SQLiteParserDROP_, 0) -} - -func (s *Drop_stmtContext) Any_name() IAny_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IAny_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IAny_nameContext) -} - -func (s *Drop_stmtContext) INDEX_() antlr.TerminalNode { - return s.GetToken(SQLiteParserINDEX_, 0) -} - -func (s *Drop_stmtContext) TABLE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserTABLE_, 0) -} - -func (s *Drop_stmtContext) TRIGGER_() antlr.TerminalNode { - return s.GetToken(SQLiteParserTRIGGER_, 0) -} - -func (s *Drop_stmtContext) VIEW_() antlr.TerminalNode { - return s.GetToken(SQLiteParserVIEW_, 0) -} - -func (s *Drop_stmtContext) IF_() antlr.TerminalNode { - return s.GetToken(SQLiteParserIF_, 0) -} - -func (s *Drop_stmtContext) EXISTS_() antlr.TerminalNode { - return s.GetToken(SQLiteParserEXISTS_, 0) -} - -func (s *Drop_stmtContext) Schema_name() ISchema_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ISchema_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ISchema_nameContext) -} - -func (s *Drop_stmtContext) DOT() antlr.TerminalNode { - return s.GetToken(SQLiteParserDOT, 0) -} - -func (s *Drop_stmtContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Drop_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Drop_stmtContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterDrop_stmt(s) - } -} - -func (s *Drop_stmtContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitDrop_stmt(s) - } -} - -func (p *SQLiteParser) Drop_stmt() (localctx IDrop_stmtContext) { - this := p - _ = this - - localctx = NewDrop_stmtContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 62, SQLiteParserRULE_drop_stmt) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(863) - p.Match(SQLiteParserDROP_) - } - { - p.SetState(864) - - var _lt = p.GetTokenStream().LT(1) - - localctx.(*Drop_stmtContext).object = _lt - - _la = p.GetTokenStream().LA(1) - - if !((int64((_la-84)) & ^0x3f) == 0 && ((int64(1)<<(_la-84))&2324138882699886593) != 0) { - var _ri = p.GetErrorHandler().RecoverInline(p) - - localctx.(*Drop_stmtContext).object = _ri - } else { - p.GetErrorHandler().ReportMatch(p) - p.Consume() - } - } - p.SetState(867) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 106, p.GetParserRuleContext()) == 1 { - { - p.SetState(865) - p.Match(SQLiteParserIF_) - } - { - p.SetState(866) - p.Match(SQLiteParserEXISTS_) - } - - } - p.SetState(872) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 107, p.GetParserRuleContext()) == 1 { - { - p.SetState(869) - p.Schema_name() - } - { - p.SetState(870) - p.Match(SQLiteParserDOT) - } - - } - { - p.SetState(874) - p.Any_name() - } - - return localctx -} - -// IExprContext is an interface to support dynamic dispatch. -type IExprContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - Literal_value() ILiteral_valueContext - BIND_PARAMETER() antlr.TerminalNode - Column_name() IColumn_nameContext - Table_name() ITable_nameContext - AllDOT() []antlr.TerminalNode - DOT(i int) antlr.TerminalNode - Schema_name() ISchema_nameContext - Unary_operator() IUnary_operatorContext - AllExpr() []IExprContext - Expr(i int) IExprContext - Function_name() IFunction_nameContext - OPEN_PAR() antlr.TerminalNode - CLOSE_PAR() antlr.TerminalNode - STAR() antlr.TerminalNode - Filter_clause() IFilter_clauseContext - Over_clause() IOver_clauseContext - DISTINCT_() antlr.TerminalNode - AllCOMMA() []antlr.TerminalNode - COMMA(i int) antlr.TerminalNode - CAST_() antlr.TerminalNode - AS_() antlr.TerminalNode - Type_name() IType_nameContext - Select_stmt() ISelect_stmtContext - EXISTS_() antlr.TerminalNode - NOT_() antlr.TerminalNode - CASE_() antlr.TerminalNode - END_() antlr.TerminalNode - AllWHEN_() []antlr.TerminalNode - WHEN_(i int) antlr.TerminalNode - AllTHEN_() []antlr.TerminalNode - THEN_(i int) antlr.TerminalNode - ELSE_() antlr.TerminalNode - Raise_function() IRaise_functionContext - PIPE2() antlr.TerminalNode - DIV() antlr.TerminalNode - MOD() antlr.TerminalNode - PLUS() antlr.TerminalNode - MINUS() antlr.TerminalNode - LT2() antlr.TerminalNode - GT2() antlr.TerminalNode - AMP() antlr.TerminalNode - PIPE() antlr.TerminalNode - LT() antlr.TerminalNode - LT_EQ() antlr.TerminalNode - GT() antlr.TerminalNode - GT_EQ() antlr.TerminalNode - ASSIGN() antlr.TerminalNode - EQ() antlr.TerminalNode - NOT_EQ1() antlr.TerminalNode - NOT_EQ2() antlr.TerminalNode - IS_() antlr.TerminalNode - IN_() antlr.TerminalNode - LIKE_() antlr.TerminalNode - GLOB_() antlr.TerminalNode - MATCH_() antlr.TerminalNode - REGEXP_() antlr.TerminalNode - AND_() antlr.TerminalNode - OR_() antlr.TerminalNode - BETWEEN_() antlr.TerminalNode - COLLATE_() antlr.TerminalNode - Collation_name() ICollation_nameContext - ESCAPE_() antlr.TerminalNode - ISNULL_() antlr.TerminalNode - NOTNULL_() antlr.TerminalNode - NULL_() antlr.TerminalNode - Table_function_name() ITable_function_nameContext - - // IsExprContext differentiates from other interfaces. - IsExprContext() -} - -type ExprContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyExprContext() *ExprContext { - var p = new(ExprContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_expr - return p -} - -func (*ExprContext) IsExprContext() {} - -func NewExprContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ExprContext { - var p = new(ExprContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_expr - - return p -} - -func (s *ExprContext) GetParser() antlr.Parser { return s.parser } - -func (s *ExprContext) Literal_value() ILiteral_valueContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ILiteral_valueContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ILiteral_valueContext) -} - -func (s *ExprContext) BIND_PARAMETER() antlr.TerminalNode { - return s.GetToken(SQLiteParserBIND_PARAMETER, 0) -} - -func (s *ExprContext) Column_name() IColumn_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IColumn_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IColumn_nameContext) -} - -func (s *ExprContext) Table_name() ITable_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ITable_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ITable_nameContext) -} - -func (s *ExprContext) AllDOT() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserDOT) -} - -func (s *ExprContext) DOT(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserDOT, i) -} - -func (s *ExprContext) Schema_name() ISchema_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ISchema_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ISchema_nameContext) -} - -func (s *ExprContext) Unary_operator() IUnary_operatorContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IUnary_operatorContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IUnary_operatorContext) -} - -func (s *ExprContext) AllExpr() []IExprContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IExprContext); ok { - len++ - } - } - - tst := make([]IExprContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IExprContext); ok { - tst[i] = t.(IExprContext) - i++ - } - } - - return tst -} - -func (s *ExprContext) Expr(i int) IExprContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IExprContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IExprContext) -} - -func (s *ExprContext) Function_name() IFunction_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IFunction_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IFunction_nameContext) -} - -func (s *ExprContext) OPEN_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserOPEN_PAR, 0) -} - -func (s *ExprContext) CLOSE_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserCLOSE_PAR, 0) -} - -func (s *ExprContext) STAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserSTAR, 0) -} - -func (s *ExprContext) Filter_clause() IFilter_clauseContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IFilter_clauseContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IFilter_clauseContext) -} - -func (s *ExprContext) Over_clause() IOver_clauseContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IOver_clauseContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IOver_clauseContext) -} - -func (s *ExprContext) DISTINCT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserDISTINCT_, 0) -} - -func (s *ExprContext) AllCOMMA() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserCOMMA) -} - -func (s *ExprContext) COMMA(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserCOMMA, i) -} - -func (s *ExprContext) CAST_() antlr.TerminalNode { - return s.GetToken(SQLiteParserCAST_, 0) -} - -func (s *ExprContext) AS_() antlr.TerminalNode { - return s.GetToken(SQLiteParserAS_, 0) -} - -func (s *ExprContext) Type_name() IType_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IType_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IType_nameContext) -} - -func (s *ExprContext) Select_stmt() ISelect_stmtContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ISelect_stmtContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ISelect_stmtContext) -} - -func (s *ExprContext) EXISTS_() antlr.TerminalNode { - return s.GetToken(SQLiteParserEXISTS_, 0) -} - -func (s *ExprContext) NOT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserNOT_, 0) -} - -func (s *ExprContext) CASE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserCASE_, 0) -} - -func (s *ExprContext) END_() antlr.TerminalNode { - return s.GetToken(SQLiteParserEND_, 0) -} - -func (s *ExprContext) AllWHEN_() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserWHEN_) -} - -func (s *ExprContext) WHEN_(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserWHEN_, i) -} - -func (s *ExprContext) AllTHEN_() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserTHEN_) -} - -func (s *ExprContext) THEN_(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserTHEN_, i) -} - -func (s *ExprContext) ELSE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserELSE_, 0) -} - -func (s *ExprContext) Raise_function() IRaise_functionContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IRaise_functionContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IRaise_functionContext) -} - -func (s *ExprContext) PIPE2() antlr.TerminalNode { - return s.GetToken(SQLiteParserPIPE2, 0) -} - -func (s *ExprContext) DIV() antlr.TerminalNode { - return s.GetToken(SQLiteParserDIV, 0) -} - -func (s *ExprContext) MOD() antlr.TerminalNode { - return s.GetToken(SQLiteParserMOD, 0) -} - -func (s *ExprContext) PLUS() antlr.TerminalNode { - return s.GetToken(SQLiteParserPLUS, 0) -} - -func (s *ExprContext) MINUS() antlr.TerminalNode { - return s.GetToken(SQLiteParserMINUS, 0) -} - -func (s *ExprContext) LT2() antlr.TerminalNode { - return s.GetToken(SQLiteParserLT2, 0) -} - -func (s *ExprContext) GT2() antlr.TerminalNode { - return s.GetToken(SQLiteParserGT2, 0) -} - -func (s *ExprContext) AMP() antlr.TerminalNode { - return s.GetToken(SQLiteParserAMP, 0) -} - -func (s *ExprContext) PIPE() antlr.TerminalNode { - return s.GetToken(SQLiteParserPIPE, 0) -} - -func (s *ExprContext) LT() antlr.TerminalNode { - return s.GetToken(SQLiteParserLT, 0) -} - -func (s *ExprContext) LT_EQ() antlr.TerminalNode { - return s.GetToken(SQLiteParserLT_EQ, 0) -} - -func (s *ExprContext) GT() antlr.TerminalNode { - return s.GetToken(SQLiteParserGT, 0) -} - -func (s *ExprContext) GT_EQ() antlr.TerminalNode { - return s.GetToken(SQLiteParserGT_EQ, 0) -} - -func (s *ExprContext) ASSIGN() antlr.TerminalNode { - return s.GetToken(SQLiteParserASSIGN, 0) -} - -func (s *ExprContext) EQ() antlr.TerminalNode { - return s.GetToken(SQLiteParserEQ, 0) -} - -func (s *ExprContext) NOT_EQ1() antlr.TerminalNode { - return s.GetToken(SQLiteParserNOT_EQ1, 0) -} - -func (s *ExprContext) NOT_EQ2() antlr.TerminalNode { - return s.GetToken(SQLiteParserNOT_EQ2, 0) -} - -func (s *ExprContext) IS_() antlr.TerminalNode { - return s.GetToken(SQLiteParserIS_, 0) -} - -func (s *ExprContext) IN_() antlr.TerminalNode { - return s.GetToken(SQLiteParserIN_, 0) -} - -func (s *ExprContext) LIKE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserLIKE_, 0) -} - -func (s *ExprContext) GLOB_() antlr.TerminalNode { - return s.GetToken(SQLiteParserGLOB_, 0) -} - -func (s *ExprContext) MATCH_() antlr.TerminalNode { - return s.GetToken(SQLiteParserMATCH_, 0) -} - -func (s *ExprContext) REGEXP_() antlr.TerminalNode { - return s.GetToken(SQLiteParserREGEXP_, 0) -} - -func (s *ExprContext) AND_() antlr.TerminalNode { - return s.GetToken(SQLiteParserAND_, 0) -} - -func (s *ExprContext) OR_() antlr.TerminalNode { - return s.GetToken(SQLiteParserOR_, 0) -} - -func (s *ExprContext) BETWEEN_() antlr.TerminalNode { - return s.GetToken(SQLiteParserBETWEEN_, 0) -} - -func (s *ExprContext) COLLATE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserCOLLATE_, 0) -} - -func (s *ExprContext) Collation_name() ICollation_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ICollation_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ICollation_nameContext) -} - -func (s *ExprContext) ESCAPE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserESCAPE_, 0) -} - -func (s *ExprContext) ISNULL_() antlr.TerminalNode { - return s.GetToken(SQLiteParserISNULL_, 0) -} - -func (s *ExprContext) NOTNULL_() antlr.TerminalNode { - return s.GetToken(SQLiteParserNOTNULL_, 0) -} - -func (s *ExprContext) NULL_() antlr.TerminalNode { - return s.GetToken(SQLiteParserNULL_, 0) -} - -func (s *ExprContext) Table_function_name() ITable_function_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ITable_function_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ITable_function_nameContext) -} - -func (s *ExprContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *ExprContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *ExprContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterExpr(s) - } -} - -func (s *ExprContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitExpr(s) - } -} - -func (p *SQLiteParser) Expr() (localctx IExprContext) { - return p.expr(0) -} - -func (p *SQLiteParser) expr(_p int) (localctx IExprContext) { - this := p - _ = this - - var _parentctx antlr.ParserRuleContext = p.GetParserRuleContext() - _parentState := p.GetState() - localctx = NewExprContext(p, p.GetParserRuleContext(), _parentState) - var _prevctx IExprContext = localctx - var _ antlr.ParserRuleContext = _prevctx // TODO: To prevent unused variable warning. - _startState := 64 - p.EnterRecursionRule(localctx, 64, SQLiteParserRULE_expr, _p) - var _la int - - defer func() { - p.UnrollRecursionContexts(_parentctx) - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - var _alt int - - p.EnterOuterAlt(localctx, 1) - p.SetState(964) - p.GetErrorHandler().Sync(p) - switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 121, p.GetParserRuleContext()) { - case 1: - { - p.SetState(877) - p.Literal_value() - } - - case 2: - { - p.SetState(878) - p.Match(SQLiteParserBIND_PARAMETER) - } - - case 3: - p.SetState(887) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 109, p.GetParserRuleContext()) == 1 { - p.SetState(882) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 108, p.GetParserRuleContext()) == 1 { - { - p.SetState(879) - p.Schema_name() - } - { - p.SetState(880) - p.Match(SQLiteParserDOT) - } - - } - { - p.SetState(884) - p.Table_name() - } - { - p.SetState(885) - p.Match(SQLiteParserDOT) - } - - } - { - p.SetState(889) - p.Column_name() - } - - case 4: - { - p.SetState(890) - p.Unary_operator() - } - { - p.SetState(891) - p.expr(21) - } - - case 5: - { - p.SetState(893) - p.Function_name() - } - { - p.SetState(894) - p.Match(SQLiteParserOPEN_PAR) - } - p.SetState(907) - p.GetErrorHandler().Sync(p) - - switch p.GetTokenStream().LA(1) { - case SQLiteParserOPEN_PAR, SQLiteParserPLUS, SQLiteParserMINUS, SQLiteParserTILDE, SQLiteParserABORT_, SQLiteParserACTION_, SQLiteParserADD_, SQLiteParserAFTER_, SQLiteParserALL_, SQLiteParserALTER_, SQLiteParserANALYZE_, SQLiteParserAND_, SQLiteParserAS_, SQLiteParserASC_, SQLiteParserATTACH_, SQLiteParserAUTOINCREMENT_, SQLiteParserBEFORE_, SQLiteParserBEGIN_, SQLiteParserBETWEEN_, SQLiteParserBY_, SQLiteParserCASCADE_, SQLiteParserCASE_, SQLiteParserCAST_, SQLiteParserCHECK_, SQLiteParserCOLLATE_, SQLiteParserCOLUMN_, SQLiteParserCOMMIT_, SQLiteParserCONFLICT_, SQLiteParserCONSTRAINT_, SQLiteParserCREATE_, SQLiteParserCROSS_, SQLiteParserCURRENT_DATE_, SQLiteParserCURRENT_TIME_, SQLiteParserCURRENT_TIMESTAMP_, SQLiteParserDATABASE_, SQLiteParserDEFAULT_, SQLiteParserDEFERRABLE_, SQLiteParserDEFERRED_, SQLiteParserDELETE_, SQLiteParserDESC_, SQLiteParserDETACH_, SQLiteParserDISTINCT_, SQLiteParserDROP_, SQLiteParserEACH_, SQLiteParserELSE_, SQLiteParserEND_, SQLiteParserESCAPE_, SQLiteParserEXCEPT_, SQLiteParserEXCLUSIVE_, SQLiteParserEXISTS_, SQLiteParserEXPLAIN_, SQLiteParserFAIL_, SQLiteParserFOR_, SQLiteParserFOREIGN_, SQLiteParserFROM_, SQLiteParserFULL_, SQLiteParserGLOB_, SQLiteParserGROUP_, SQLiteParserHAVING_, SQLiteParserIF_, SQLiteParserIGNORE_, SQLiteParserIMMEDIATE_, SQLiteParserIN_, SQLiteParserINDEX_, SQLiteParserINDEXED_, SQLiteParserINITIALLY_, SQLiteParserINNER_, SQLiteParserINSERT_, SQLiteParserINSTEAD_, SQLiteParserINTERSECT_, SQLiteParserINTO_, SQLiteParserIS_, SQLiteParserISNULL_, SQLiteParserJOIN_, SQLiteParserKEY_, SQLiteParserLEFT_, SQLiteParserLIKE_, SQLiteParserLIMIT_, SQLiteParserMATCH_, SQLiteParserNATURAL_, SQLiteParserNO_, SQLiteParserNOT_, SQLiteParserNOTNULL_, SQLiteParserNULL_, SQLiteParserOF_, SQLiteParserOFFSET_, SQLiteParserON_, SQLiteParserOR_, SQLiteParserORDER_, SQLiteParserOUTER_, SQLiteParserPLAN_, SQLiteParserPRAGMA_, SQLiteParserPRIMARY_, SQLiteParserQUERY_, SQLiteParserRAISE_, SQLiteParserRECURSIVE_, SQLiteParserREFERENCES_, SQLiteParserREGEXP_, SQLiteParserREINDEX_, SQLiteParserRELEASE_, SQLiteParserRENAME_, SQLiteParserREPLACE_, SQLiteParserRESTRICT_, SQLiteParserRIGHT_, SQLiteParserROLLBACK_, SQLiteParserROW_, SQLiteParserROWS_, SQLiteParserSAVEPOINT_, SQLiteParserSELECT_, SQLiteParserSET_, SQLiteParserTABLE_, SQLiteParserTEMP_, SQLiteParserTEMPORARY_, SQLiteParserTHEN_, SQLiteParserTO_, SQLiteParserTRANSACTION_, SQLiteParserTRIGGER_, SQLiteParserUNION_, SQLiteParserUNIQUE_, SQLiteParserUPDATE_, SQLiteParserUSING_, SQLiteParserVACUUM_, SQLiteParserVALUES_, SQLiteParserVIEW_, SQLiteParserVIRTUAL_, SQLiteParserWHEN_, SQLiteParserWHERE_, SQLiteParserWITH_, SQLiteParserWITHOUT_, SQLiteParserFIRST_VALUE_, SQLiteParserOVER_, SQLiteParserPARTITION_, SQLiteParserRANGE_, SQLiteParserPRECEDING_, SQLiteParserUNBOUNDED_, SQLiteParserCURRENT_, SQLiteParserFOLLOWING_, SQLiteParserCUME_DIST_, SQLiteParserDENSE_RANK_, SQLiteParserLAG_, SQLiteParserLAST_VALUE_, SQLiteParserLEAD_, SQLiteParserNTH_VALUE_, SQLiteParserNTILE_, SQLiteParserPERCENT_RANK_, SQLiteParserRANK_, SQLiteParserROW_NUMBER_, SQLiteParserGENERATED_, SQLiteParserALWAYS_, SQLiteParserSTORED_, SQLiteParserTRUE_, SQLiteParserFALSE_, SQLiteParserWINDOW_, SQLiteParserNULLS_, SQLiteParserFIRST_, SQLiteParserLAST_, SQLiteParserFILTER_, SQLiteParserGROUPS_, SQLiteParserEXCLUDE_, SQLiteParserIDENTIFIER, SQLiteParserNUMERIC_LITERAL, SQLiteParserBIND_PARAMETER, SQLiteParserSTRING_LITERAL, SQLiteParserBLOB_LITERAL: - p.SetState(896) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 110, p.GetParserRuleContext()) == 1 { - { - p.SetState(895) - p.Match(SQLiteParserDISTINCT_) - } - - } - { - p.SetState(898) - p.expr(0) - } - p.SetState(903) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for _la == SQLiteParserCOMMA { - { - p.SetState(899) - p.Match(SQLiteParserCOMMA) - } - { - p.SetState(900) - p.expr(0) - } - - p.SetState(905) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - - case SQLiteParserSTAR: - { - p.SetState(906) - p.Match(SQLiteParserSTAR) - } - - case SQLiteParserCLOSE_PAR: - - default: - } - { - p.SetState(909) - p.Match(SQLiteParserCLOSE_PAR) - } - p.SetState(911) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 113, p.GetParserRuleContext()) == 1 { - { - p.SetState(910) - p.Filter_clause() - } - - } - p.SetState(914) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 114, p.GetParserRuleContext()) == 1 { - { - p.SetState(913) - p.Over_clause() - } - - } - - case 6: - { - p.SetState(916) - p.Match(SQLiteParserOPEN_PAR) - } - { - p.SetState(917) - p.expr(0) - } - p.SetState(922) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for _la == SQLiteParserCOMMA { - { - p.SetState(918) - p.Match(SQLiteParserCOMMA) - } - { - p.SetState(919) - p.expr(0) - } - - p.SetState(924) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - { - p.SetState(925) - p.Match(SQLiteParserCLOSE_PAR) - } - - case 7: - { - p.SetState(927) - p.Match(SQLiteParserCAST_) - } - { - p.SetState(928) - p.Match(SQLiteParserOPEN_PAR) - } - { - p.SetState(929) - p.expr(0) - } - { - p.SetState(930) - p.Match(SQLiteParserAS_) - } - { - p.SetState(931) - p.Type_name() - } - { - p.SetState(932) - p.Match(SQLiteParserCLOSE_PAR) - } - - case 8: - p.SetState(938) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserEXISTS_ || _la == SQLiteParserNOT_ { - p.SetState(935) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserNOT_ { - { - p.SetState(934) - p.Match(SQLiteParserNOT_) - } - - } - { - p.SetState(937) - p.Match(SQLiteParserEXISTS_) - } - - } - { - p.SetState(940) - p.Match(SQLiteParserOPEN_PAR) - } - { - p.SetState(941) - p.Select_stmt() - } - { - p.SetState(942) - p.Match(SQLiteParserCLOSE_PAR) - } - - case 9: - { - p.SetState(944) - p.Match(SQLiteParserCASE_) - } - p.SetState(946) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 118, p.GetParserRuleContext()) == 1 { - { - p.SetState(945) - p.expr(0) - } - - } - p.SetState(953) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for ok := true; ok; ok = _la == SQLiteParserWHEN_ { - { - p.SetState(948) - p.Match(SQLiteParserWHEN_) - } - { - p.SetState(949) - p.expr(0) - } - { - p.SetState(950) - p.Match(SQLiteParserTHEN_) - } - { - p.SetState(951) - p.expr(0) - } - - p.SetState(955) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - p.SetState(959) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserELSE_ { - { - p.SetState(957) - p.Match(SQLiteParserELSE_) - } - { - p.SetState(958) - p.expr(0) - } - - } - { - p.SetState(961) - p.Match(SQLiteParserEND_) - } - - case 10: - { - p.SetState(963) - p.Raise_function() - } - - } - p.GetParserRuleContext().SetStop(p.GetTokenStream().LT(-1)) - p.SetState(1085) - p.GetErrorHandler().Sync(p) - _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 137, p.GetParserRuleContext()) - - for _alt != 2 && _alt != antlr.ATNInvalidAltNumber { - if _alt == 1 { - if p.GetParseListeners() != nil { - p.TriggerExitRuleEvent() - } - _prevctx = localctx - p.SetState(1083) - p.GetErrorHandler().Sync(p) - switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 136, p.GetParserRuleContext()) { - case 1: - localctx = NewExprContext(p, _parentctx, _parentState) - p.PushNewRecursionContext(localctx, _startState, SQLiteParserRULE_expr) - p.SetState(966) - - if !(p.Precpred(p.GetParserRuleContext(), 20)) { - panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 20)", "")) - } - { - p.SetState(967) - p.Match(SQLiteParserPIPE2) - } - { - p.SetState(968) - p.expr(21) - } - - case 2: - localctx = NewExprContext(p, _parentctx, _parentState) - p.PushNewRecursionContext(localctx, _startState, SQLiteParserRULE_expr) - p.SetState(969) - - if !(p.Precpred(p.GetParserRuleContext(), 19)) { - panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 19)", "")) - } - { - p.SetState(970) - _la = p.GetTokenStream().LA(1) - - if !((int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&12416) != 0) { - p.GetErrorHandler().RecoverInline(p) - } else { - p.GetErrorHandler().ReportMatch(p) - p.Consume() - } - } - { - p.SetState(971) - p.expr(20) - } - - case 3: - localctx = NewExprContext(p, _parentctx, _parentState) - p.PushNewRecursionContext(localctx, _startState, SQLiteParserRULE_expr) - p.SetState(972) - - if !(p.Precpred(p.GetParserRuleContext(), 18)) { - panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 18)", "")) - } - { - p.SetState(973) - _la = p.GetTokenStream().LA(1) - - if !(_la == SQLiteParserPLUS || _la == SQLiteParserMINUS) { - p.GetErrorHandler().RecoverInline(p) - } else { - p.GetErrorHandler().ReportMatch(p) - p.Consume() - } - } - { - p.SetState(974) - p.expr(19) - } - - case 4: - localctx = NewExprContext(p, _parentctx, _parentState) - p.PushNewRecursionContext(localctx, _startState, SQLiteParserRULE_expr) - p.SetState(975) - - if !(p.Precpred(p.GetParserRuleContext(), 17)) { - panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 17)", "")) - } - { - p.SetState(976) - _la = p.GetTokenStream().LA(1) - - if !((int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&245760) != 0) { - p.GetErrorHandler().RecoverInline(p) - } else { - p.GetErrorHandler().ReportMatch(p) - p.Consume() - } - } - { - p.SetState(977) - p.expr(18) - } - - case 5: - localctx = NewExprContext(p, _parentctx, _parentState) - p.PushNewRecursionContext(localctx, _startState, SQLiteParserRULE_expr) - p.SetState(978) - - if !(p.Precpred(p.GetParserRuleContext(), 16)) { - panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 16)", "")) - } - { - p.SetState(979) - _la = p.GetTokenStream().LA(1) - - if !((int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&3932160) != 0) { - p.GetErrorHandler().RecoverInline(p) - } else { - p.GetErrorHandler().ReportMatch(p) - p.Consume() - } - } - { - p.SetState(980) - p.expr(17) - } - - case 6: - localctx = NewExprContext(p, _parentctx, _parentState) - p.PushNewRecursionContext(localctx, _startState, SQLiteParserRULE_expr) - p.SetState(981) - - if !(p.Precpred(p.GetParserRuleContext(), 15)) { - panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 15)", "")) - } - p.SetState(994) - p.GetErrorHandler().Sync(p) - switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 122, p.GetParserRuleContext()) { - case 1: - { - p.SetState(982) - p.Match(SQLiteParserASSIGN) - } - - case 2: - { - p.SetState(983) - p.Match(SQLiteParserEQ) - } - - case 3: - { - p.SetState(984) - p.Match(SQLiteParserNOT_EQ1) - } - - case 4: - { - p.SetState(985) - p.Match(SQLiteParserNOT_EQ2) - } - - case 5: - { - p.SetState(986) - p.Match(SQLiteParserIS_) - } - - case 6: - { - p.SetState(987) - p.Match(SQLiteParserIS_) - } - { - p.SetState(988) - p.Match(SQLiteParserNOT_) - } - - case 7: - { - p.SetState(989) - p.Match(SQLiteParserIN_) - } - - case 8: - { - p.SetState(990) - p.Match(SQLiteParserLIKE_) - } - - case 9: - { - p.SetState(991) - p.Match(SQLiteParserGLOB_) - } - - case 10: - { - p.SetState(992) - p.Match(SQLiteParserMATCH_) - } - - case 11: - { - p.SetState(993) - p.Match(SQLiteParserREGEXP_) - } - - } - { - p.SetState(996) - p.expr(16) - } - - case 7: - localctx = NewExprContext(p, _parentctx, _parentState) - p.PushNewRecursionContext(localctx, _startState, SQLiteParserRULE_expr) - p.SetState(997) - - if !(p.Precpred(p.GetParserRuleContext(), 14)) { - panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 14)", "")) - } - { - p.SetState(998) - p.Match(SQLiteParserAND_) - } - { - p.SetState(999) - p.expr(15) - } - - case 8: - localctx = NewExprContext(p, _parentctx, _parentState) - p.PushNewRecursionContext(localctx, _startState, SQLiteParserRULE_expr) - p.SetState(1000) - - if !(p.Precpred(p.GetParserRuleContext(), 13)) { - panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 13)", "")) - } - { - p.SetState(1001) - p.Match(SQLiteParserOR_) - } - { - p.SetState(1002) - p.expr(14) - } - - case 9: - localctx = NewExprContext(p, _parentctx, _parentState) - p.PushNewRecursionContext(localctx, _startState, SQLiteParserRULE_expr) - p.SetState(1003) - - if !(p.Precpred(p.GetParserRuleContext(), 6)) { - panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 6)", "")) - } - { - p.SetState(1004) - p.Match(SQLiteParserIS_) - } - p.SetState(1006) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 123, p.GetParserRuleContext()) == 1 { - { - p.SetState(1005) - p.Match(SQLiteParserNOT_) - } - - } - { - p.SetState(1008) - p.expr(7) - } - - case 10: - localctx = NewExprContext(p, _parentctx, _parentState) - p.PushNewRecursionContext(localctx, _startState, SQLiteParserRULE_expr) - p.SetState(1009) - - if !(p.Precpred(p.GetParserRuleContext(), 5)) { - panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 5)", "")) - } - p.SetState(1011) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserNOT_ { - { - p.SetState(1010) - p.Match(SQLiteParserNOT_) - } - - } - { - p.SetState(1013) - p.Match(SQLiteParserBETWEEN_) - } - { - p.SetState(1014) - p.expr(0) - } - { - p.SetState(1015) - p.Match(SQLiteParserAND_) - } - { - p.SetState(1016) - p.expr(6) - } - - case 11: - localctx = NewExprContext(p, _parentctx, _parentState) - p.PushNewRecursionContext(localctx, _startState, SQLiteParserRULE_expr) - p.SetState(1018) - - if !(p.Precpred(p.GetParserRuleContext(), 9)) { - panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 9)", "")) - } - { - p.SetState(1019) - p.Match(SQLiteParserCOLLATE_) - } - { - p.SetState(1020) - p.Collation_name() - } - - case 12: - localctx = NewExprContext(p, _parentctx, _parentState) - p.PushNewRecursionContext(localctx, _startState, SQLiteParserRULE_expr) - p.SetState(1021) - - if !(p.Precpred(p.GetParserRuleContext(), 8)) { - panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 8)", "")) - } - p.SetState(1023) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserNOT_ { - { - p.SetState(1022) - p.Match(SQLiteParserNOT_) - } - - } - { - p.SetState(1025) - _la = p.GetTokenStream().LA(1) - - if !((int64((_la-77)) & ^0x3f) == 0 && ((int64(1)<<(_la-77))&2199028498433) != 0) { - p.GetErrorHandler().RecoverInline(p) - } else { - p.GetErrorHandler().ReportMatch(p) - p.Consume() - } - } - { - p.SetState(1026) - p.expr(0) - } - p.SetState(1029) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 126, p.GetParserRuleContext()) == 1 { - { - p.SetState(1027) - p.Match(SQLiteParserESCAPE_) - } - { - p.SetState(1028) - p.expr(0) - } - - } - - case 13: - localctx = NewExprContext(p, _parentctx, _parentState) - p.PushNewRecursionContext(localctx, _startState, SQLiteParserRULE_expr) - p.SetState(1031) - - if !(p.Precpred(p.GetParserRuleContext(), 7)) { - panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 7)", "")) - } - p.SetState(1036) - p.GetErrorHandler().Sync(p) - - switch p.GetTokenStream().LA(1) { - case SQLiteParserISNULL_: - { - p.SetState(1032) - p.Match(SQLiteParserISNULL_) - } - - case SQLiteParserNOTNULL_: - { - p.SetState(1033) - p.Match(SQLiteParserNOTNULL_) - } - - case SQLiteParserNOT_: - { - p.SetState(1034) - p.Match(SQLiteParserNOT_) - } - { - p.SetState(1035) - p.Match(SQLiteParserNULL_) - } - - default: - panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) - } - - case 14: - localctx = NewExprContext(p, _parentctx, _parentState) - p.PushNewRecursionContext(localctx, _startState, SQLiteParserRULE_expr) - p.SetState(1038) - - if !(p.Precpred(p.GetParserRuleContext(), 4)) { - panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 4)", "")) - } - p.SetState(1040) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserNOT_ { - { - p.SetState(1039) - p.Match(SQLiteParserNOT_) - } - - } - { - p.SetState(1042) - p.Match(SQLiteParserIN_) - } - p.SetState(1081) - p.GetErrorHandler().Sync(p) - switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 135, p.GetParserRuleContext()) { - case 1: - { - p.SetState(1043) - p.Match(SQLiteParserOPEN_PAR) - } - p.SetState(1053) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 130, p.GetParserRuleContext()) == 1 { - { - p.SetState(1044) - p.Select_stmt() - } - - } else if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 130, p.GetParserRuleContext()) == 2 { - { - p.SetState(1045) - p.expr(0) - } - p.SetState(1050) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for _la == SQLiteParserCOMMA { - { - p.SetState(1046) - p.Match(SQLiteParserCOMMA) - } - { - p.SetState(1047) - p.expr(0) - } - - p.SetState(1052) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - - } - { - p.SetState(1055) - p.Match(SQLiteParserCLOSE_PAR) - } - - case 2: - p.SetState(1059) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 131, p.GetParserRuleContext()) == 1 { - { - p.SetState(1056) - p.Schema_name() - } - { - p.SetState(1057) - p.Match(SQLiteParserDOT) - } - - } - { - p.SetState(1061) - p.Table_name() - } - - case 3: - p.SetState(1065) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 132, p.GetParserRuleContext()) == 1 { - { - p.SetState(1062) - p.Schema_name() - } - { - p.SetState(1063) - p.Match(SQLiteParserDOT) - } - - } - { - p.SetState(1067) - p.Table_function_name() - } - { - p.SetState(1068) - p.Match(SQLiteParserOPEN_PAR) - } - p.SetState(1077) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if ((int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&-33552632) != 0) || ((int64((_la-64)) & ^0x3f) == 0 && ((int64(1)<<(_la-64))&-1152921504606846977) != 0) || ((int64((_la-128)) & ^0x3f) == 0 && ((int64(1)<<(_la-128))&4476578029606273023) != 0) { - { - p.SetState(1069) - p.expr(0) - } - p.SetState(1074) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for _la == SQLiteParserCOMMA { - { - p.SetState(1070) - p.Match(SQLiteParserCOMMA) - } - { - p.SetState(1071) - p.expr(0) - } - - p.SetState(1076) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - - } - { - p.SetState(1079) - p.Match(SQLiteParserCLOSE_PAR) - } - - } - - } - - } - p.SetState(1087) - p.GetErrorHandler().Sync(p) - _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 137, p.GetParserRuleContext()) - } - - return localctx -} - -// IRaise_functionContext is an interface to support dynamic dispatch. -type IRaise_functionContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - RAISE_() antlr.TerminalNode - OPEN_PAR() antlr.TerminalNode - CLOSE_PAR() antlr.TerminalNode - IGNORE_() antlr.TerminalNode - COMMA() antlr.TerminalNode - Error_message() IError_messageContext - ROLLBACK_() antlr.TerminalNode - ABORT_() antlr.TerminalNode - FAIL_() antlr.TerminalNode - - // IsRaise_functionContext differentiates from other interfaces. - IsRaise_functionContext() -} - -type Raise_functionContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyRaise_functionContext() *Raise_functionContext { - var p = new(Raise_functionContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_raise_function - return p -} - -func (*Raise_functionContext) IsRaise_functionContext() {} - -func NewRaise_functionContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Raise_functionContext { - var p = new(Raise_functionContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_raise_function - - return p -} - -func (s *Raise_functionContext) GetParser() antlr.Parser { return s.parser } - -func (s *Raise_functionContext) RAISE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserRAISE_, 0) -} - -func (s *Raise_functionContext) OPEN_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserOPEN_PAR, 0) -} - -func (s *Raise_functionContext) CLOSE_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserCLOSE_PAR, 0) -} - -func (s *Raise_functionContext) IGNORE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserIGNORE_, 0) -} - -func (s *Raise_functionContext) COMMA() antlr.TerminalNode { - return s.GetToken(SQLiteParserCOMMA, 0) -} - -func (s *Raise_functionContext) Error_message() IError_messageContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IError_messageContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IError_messageContext) -} - -func (s *Raise_functionContext) ROLLBACK_() antlr.TerminalNode { - return s.GetToken(SQLiteParserROLLBACK_, 0) -} - -func (s *Raise_functionContext) ABORT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserABORT_, 0) -} - -func (s *Raise_functionContext) FAIL_() antlr.TerminalNode { - return s.GetToken(SQLiteParserFAIL_, 0) -} - -func (s *Raise_functionContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Raise_functionContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Raise_functionContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterRaise_function(s) - } -} - -func (s *Raise_functionContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitRaise_function(s) - } -} - -func (p *SQLiteParser) Raise_function() (localctx IRaise_functionContext) { - this := p - _ = this - - localctx = NewRaise_functionContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 66, SQLiteParserRULE_raise_function) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(1088) - p.Match(SQLiteParserRAISE_) - } - { - p.SetState(1089) - p.Match(SQLiteParserOPEN_PAR) - } - p.SetState(1094) - p.GetErrorHandler().Sync(p) - - switch p.GetTokenStream().LA(1) { - case SQLiteParserIGNORE_: - { - p.SetState(1090) - p.Match(SQLiteParserIGNORE_) - } - - case SQLiteParserABORT_, SQLiteParserFAIL_, SQLiteParserROLLBACK_: - { - p.SetState(1091) - _la = p.GetTokenStream().LA(1) - - if !(_la == SQLiteParserABORT_ || _la == SQLiteParserFAIL_ || _la == SQLiteParserROLLBACK_) { - p.GetErrorHandler().RecoverInline(p) - } else { - p.GetErrorHandler().ReportMatch(p) - p.Consume() - } - } - { - p.SetState(1092) - p.Match(SQLiteParserCOMMA) - } - { - p.SetState(1093) - p.Error_message() - } - - default: - panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) - } - { - p.SetState(1096) - p.Match(SQLiteParserCLOSE_PAR) - } - - return localctx -} - -// ILiteral_valueContext is an interface to support dynamic dispatch. -type ILiteral_valueContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - NUMERIC_LITERAL() antlr.TerminalNode - STRING_LITERAL() antlr.TerminalNode - BLOB_LITERAL() antlr.TerminalNode - NULL_() antlr.TerminalNode - TRUE_() antlr.TerminalNode - FALSE_() antlr.TerminalNode - CURRENT_TIME_() antlr.TerminalNode - CURRENT_DATE_() antlr.TerminalNode - CURRENT_TIMESTAMP_() antlr.TerminalNode - - // IsLiteral_valueContext differentiates from other interfaces. - IsLiteral_valueContext() -} - -type Literal_valueContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyLiteral_valueContext() *Literal_valueContext { - var p = new(Literal_valueContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_literal_value - return p -} - -func (*Literal_valueContext) IsLiteral_valueContext() {} - -func NewLiteral_valueContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Literal_valueContext { - var p = new(Literal_valueContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_literal_value - - return p -} - -func (s *Literal_valueContext) GetParser() antlr.Parser { return s.parser } - -func (s *Literal_valueContext) NUMERIC_LITERAL() antlr.TerminalNode { - return s.GetToken(SQLiteParserNUMERIC_LITERAL, 0) -} - -func (s *Literal_valueContext) STRING_LITERAL() antlr.TerminalNode { - return s.GetToken(SQLiteParserSTRING_LITERAL, 0) -} - -func (s *Literal_valueContext) BLOB_LITERAL() antlr.TerminalNode { - return s.GetToken(SQLiteParserBLOB_LITERAL, 0) -} - -func (s *Literal_valueContext) NULL_() antlr.TerminalNode { - return s.GetToken(SQLiteParserNULL_, 0) -} - -func (s *Literal_valueContext) TRUE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserTRUE_, 0) -} - -func (s *Literal_valueContext) FALSE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserFALSE_, 0) -} - -func (s *Literal_valueContext) CURRENT_TIME_() antlr.TerminalNode { - return s.GetToken(SQLiteParserCURRENT_TIME_, 0) -} - -func (s *Literal_valueContext) CURRENT_DATE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserCURRENT_DATE_, 0) -} - -func (s *Literal_valueContext) CURRENT_TIMESTAMP_() antlr.TerminalNode { - return s.GetToken(SQLiteParserCURRENT_TIMESTAMP_, 0) -} - -func (s *Literal_valueContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Literal_valueContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Literal_valueContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterLiteral_value(s) - } -} - -func (s *Literal_valueContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitLiteral_value(s) - } -} - -func (p *SQLiteParser) Literal_value() (localctx ILiteral_valueContext) { - this := p - _ = this - - localctx = NewLiteral_valueContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 68, SQLiteParserRULE_literal_value) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(1098) - _la = p.GetTokenStream().LA(1) - - if !(((int64((_la-52)) & ^0x3f) == 0 && ((int64(1)<<(_la-52))&4503599627370503) != 0) || ((int64((_la-172)) & ^0x3f) == 0 && ((int64(1)<<(_la-172))&212995) != 0)) { - p.GetErrorHandler().RecoverInline(p) - } else { - p.GetErrorHandler().ReportMatch(p) - p.Consume() - } - } - - return localctx -} - -// IValue_rowContext is an interface to support dynamic dispatch. -type IValue_rowContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - OPEN_PAR() antlr.TerminalNode - AllExpr() []IExprContext - Expr(i int) IExprContext - CLOSE_PAR() antlr.TerminalNode - AllCOMMA() []antlr.TerminalNode - COMMA(i int) antlr.TerminalNode - - // IsValue_rowContext differentiates from other interfaces. - IsValue_rowContext() -} - -type Value_rowContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyValue_rowContext() *Value_rowContext { - var p = new(Value_rowContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_value_row - return p -} - -func (*Value_rowContext) IsValue_rowContext() {} - -func NewValue_rowContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Value_rowContext { - var p = new(Value_rowContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_value_row - - return p -} - -func (s *Value_rowContext) GetParser() antlr.Parser { return s.parser } - -func (s *Value_rowContext) OPEN_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserOPEN_PAR, 0) -} - -func (s *Value_rowContext) AllExpr() []IExprContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IExprContext); ok { - len++ - } - } - - tst := make([]IExprContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IExprContext); ok { - tst[i] = t.(IExprContext) - i++ - } - } - - return tst -} - -func (s *Value_rowContext) Expr(i int) IExprContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IExprContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IExprContext) -} - -func (s *Value_rowContext) CLOSE_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserCLOSE_PAR, 0) -} - -func (s *Value_rowContext) AllCOMMA() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserCOMMA) -} - -func (s *Value_rowContext) COMMA(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserCOMMA, i) -} - -func (s *Value_rowContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Value_rowContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Value_rowContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterValue_row(s) - } -} - -func (s *Value_rowContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitValue_row(s) - } -} - -func (p *SQLiteParser) Value_row() (localctx IValue_rowContext) { - this := p - _ = this - - localctx = NewValue_rowContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 70, SQLiteParserRULE_value_row) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(1100) - p.Match(SQLiteParserOPEN_PAR) - } - { - p.SetState(1101) - p.expr(0) - } - p.SetState(1106) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for _la == SQLiteParserCOMMA { - { - p.SetState(1102) - p.Match(SQLiteParserCOMMA) - } - { - p.SetState(1103) - p.expr(0) - } - - p.SetState(1108) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - { - p.SetState(1109) - p.Match(SQLiteParserCLOSE_PAR) - } - - return localctx -} - -// IValues_clauseContext is an interface to support dynamic dispatch. -type IValues_clauseContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - VALUES_() antlr.TerminalNode - AllValue_row() []IValue_rowContext - Value_row(i int) IValue_rowContext - AllCOMMA() []antlr.TerminalNode - COMMA(i int) antlr.TerminalNode - - // IsValues_clauseContext differentiates from other interfaces. - IsValues_clauseContext() -} - -type Values_clauseContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyValues_clauseContext() *Values_clauseContext { - var p = new(Values_clauseContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_values_clause - return p -} - -func (*Values_clauseContext) IsValues_clauseContext() {} - -func NewValues_clauseContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Values_clauseContext { - var p = new(Values_clauseContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_values_clause - - return p -} - -func (s *Values_clauseContext) GetParser() antlr.Parser { return s.parser } - -func (s *Values_clauseContext) VALUES_() antlr.TerminalNode { - return s.GetToken(SQLiteParserVALUES_, 0) -} - -func (s *Values_clauseContext) AllValue_row() []IValue_rowContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IValue_rowContext); ok { - len++ - } - } - - tst := make([]IValue_rowContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IValue_rowContext); ok { - tst[i] = t.(IValue_rowContext) - i++ - } - } - - return tst -} - -func (s *Values_clauseContext) Value_row(i int) IValue_rowContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IValue_rowContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IValue_rowContext) -} - -func (s *Values_clauseContext) AllCOMMA() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserCOMMA) -} - -func (s *Values_clauseContext) COMMA(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserCOMMA, i) -} - -func (s *Values_clauseContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Values_clauseContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Values_clauseContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterValues_clause(s) - } -} - -func (s *Values_clauseContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitValues_clause(s) - } -} - -func (p *SQLiteParser) Values_clause() (localctx IValues_clauseContext) { - this := p - _ = this - - localctx = NewValues_clauseContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 72, SQLiteParserRULE_values_clause) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(1111) - p.Match(SQLiteParserVALUES_) - } - { - p.SetState(1112) - p.Value_row() - } - p.SetState(1117) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for _la == SQLiteParserCOMMA { - { - p.SetState(1113) - p.Match(SQLiteParserCOMMA) - } - { - p.SetState(1114) - p.Value_row() - } - - p.SetState(1119) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - - return localctx -} - -// IInsert_stmtContext is an interface to support dynamic dispatch. -type IInsert_stmtContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - INTO_() antlr.TerminalNode - Table_name() ITable_nameContext - INSERT_() antlr.TerminalNode - REPLACE_() antlr.TerminalNode - OR_() antlr.TerminalNode - DEFAULT_() antlr.TerminalNode - VALUES_() antlr.TerminalNode - With_clause() IWith_clauseContext - ROLLBACK_() antlr.TerminalNode - ABORT_() antlr.TerminalNode - FAIL_() antlr.TerminalNode - IGNORE_() antlr.TerminalNode - Schema_name() ISchema_nameContext - DOT() antlr.TerminalNode - AS_() antlr.TerminalNode - Table_alias() ITable_aliasContext - OPEN_PAR() antlr.TerminalNode - AllColumn_name() []IColumn_nameContext - Column_name(i int) IColumn_nameContext - CLOSE_PAR() antlr.TerminalNode - Returning_clause() IReturning_clauseContext - AllCOMMA() []antlr.TerminalNode - COMMA(i int) antlr.TerminalNode - Values_clause() IValues_clauseContext - Select_stmt() ISelect_stmtContext - Upsert_clause() IUpsert_clauseContext - - // IsInsert_stmtContext differentiates from other interfaces. - IsInsert_stmtContext() -} - -type Insert_stmtContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyInsert_stmtContext() *Insert_stmtContext { - var p = new(Insert_stmtContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_insert_stmt - return p -} - -func (*Insert_stmtContext) IsInsert_stmtContext() {} - -func NewInsert_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Insert_stmtContext { - var p = new(Insert_stmtContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_insert_stmt - - return p -} - -func (s *Insert_stmtContext) GetParser() antlr.Parser { return s.parser } - -func (s *Insert_stmtContext) INTO_() antlr.TerminalNode { - return s.GetToken(SQLiteParserINTO_, 0) -} - -func (s *Insert_stmtContext) Table_name() ITable_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ITable_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ITable_nameContext) -} - -func (s *Insert_stmtContext) INSERT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserINSERT_, 0) -} - -func (s *Insert_stmtContext) REPLACE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserREPLACE_, 0) -} - -func (s *Insert_stmtContext) OR_() antlr.TerminalNode { - return s.GetToken(SQLiteParserOR_, 0) -} - -func (s *Insert_stmtContext) DEFAULT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserDEFAULT_, 0) -} - -func (s *Insert_stmtContext) VALUES_() antlr.TerminalNode { - return s.GetToken(SQLiteParserVALUES_, 0) -} - -func (s *Insert_stmtContext) With_clause() IWith_clauseContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IWith_clauseContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IWith_clauseContext) -} - -func (s *Insert_stmtContext) ROLLBACK_() antlr.TerminalNode { - return s.GetToken(SQLiteParserROLLBACK_, 0) -} - -func (s *Insert_stmtContext) ABORT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserABORT_, 0) -} - -func (s *Insert_stmtContext) FAIL_() antlr.TerminalNode { - return s.GetToken(SQLiteParserFAIL_, 0) -} - -func (s *Insert_stmtContext) IGNORE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserIGNORE_, 0) -} - -func (s *Insert_stmtContext) Schema_name() ISchema_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ISchema_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ISchema_nameContext) -} - -func (s *Insert_stmtContext) DOT() antlr.TerminalNode { - return s.GetToken(SQLiteParserDOT, 0) -} - -func (s *Insert_stmtContext) AS_() antlr.TerminalNode { - return s.GetToken(SQLiteParserAS_, 0) -} - -func (s *Insert_stmtContext) Table_alias() ITable_aliasContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ITable_aliasContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ITable_aliasContext) -} - -func (s *Insert_stmtContext) OPEN_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserOPEN_PAR, 0) -} - -func (s *Insert_stmtContext) AllColumn_name() []IColumn_nameContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IColumn_nameContext); ok { - len++ - } - } - - tst := make([]IColumn_nameContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IColumn_nameContext); ok { - tst[i] = t.(IColumn_nameContext) - i++ - } - } - - return tst -} - -func (s *Insert_stmtContext) Column_name(i int) IColumn_nameContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IColumn_nameContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IColumn_nameContext) -} - -func (s *Insert_stmtContext) CLOSE_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserCLOSE_PAR, 0) -} - -func (s *Insert_stmtContext) Returning_clause() IReturning_clauseContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IReturning_clauseContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IReturning_clauseContext) -} - -func (s *Insert_stmtContext) AllCOMMA() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserCOMMA) -} - -func (s *Insert_stmtContext) COMMA(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserCOMMA, i) -} - -func (s *Insert_stmtContext) Values_clause() IValues_clauseContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IValues_clauseContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IValues_clauseContext) -} - -func (s *Insert_stmtContext) Select_stmt() ISelect_stmtContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ISelect_stmtContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ISelect_stmtContext) -} - -func (s *Insert_stmtContext) Upsert_clause() IUpsert_clauseContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IUpsert_clauseContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IUpsert_clauseContext) -} - -func (s *Insert_stmtContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Insert_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Insert_stmtContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterInsert_stmt(s) - } -} - -func (s *Insert_stmtContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitInsert_stmt(s) - } -} - -func (p *SQLiteParser) Insert_stmt() (localctx IInsert_stmtContext) { - this := p - _ = this - - localctx = NewInsert_stmtContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 74, SQLiteParserRULE_insert_stmt) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - p.SetState(1121) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserWITH_ { - { - p.SetState(1120) - p.With_clause() - } - - } - p.SetState(1128) - p.GetErrorHandler().Sync(p) - switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 142, p.GetParserRuleContext()) { - case 1: - { - p.SetState(1123) - p.Match(SQLiteParserINSERT_) - } - - case 2: - { - p.SetState(1124) - p.Match(SQLiteParserREPLACE_) - } - - case 3: - { - p.SetState(1125) - p.Match(SQLiteParserINSERT_) - } - { - p.SetState(1126) - p.Match(SQLiteParserOR_) - } - { - p.SetState(1127) - _la = p.GetTokenStream().LA(1) - - if !(_la == SQLiteParserABORT_ || ((int64((_la-72)) & ^0x3f) == 0 && ((int64(1)<<(_la-72))&19140298416325121) != 0)) { - p.GetErrorHandler().RecoverInline(p) - } else { - p.GetErrorHandler().ReportMatch(p) - p.Consume() - } - } - - } - { - p.SetState(1130) - p.Match(SQLiteParserINTO_) - } - p.SetState(1134) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 143, p.GetParserRuleContext()) == 1 { - { - p.SetState(1131) - p.Schema_name() - } - { - p.SetState(1132) - p.Match(SQLiteParserDOT) - } - - } - { - p.SetState(1136) - p.Table_name() - } - p.SetState(1139) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserAS_ { - { - p.SetState(1137) - p.Match(SQLiteParserAS_) - } - { - p.SetState(1138) - p.Table_alias() - } - - } - p.SetState(1152) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserOPEN_PAR { - { - p.SetState(1141) - p.Match(SQLiteParserOPEN_PAR) - } - { - p.SetState(1142) - p.Column_name() - } - p.SetState(1147) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for _la == SQLiteParserCOMMA { - { - p.SetState(1143) - p.Match(SQLiteParserCOMMA) - } - { - p.SetState(1144) - p.Column_name() - } - - p.SetState(1149) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - { - p.SetState(1150) - p.Match(SQLiteParserCLOSE_PAR) - } - - } - p.SetState(1163) - p.GetErrorHandler().Sync(p) - - switch p.GetTokenStream().LA(1) { - case SQLiteParserSELECT_, SQLiteParserVALUES_, SQLiteParserWITH_: - p.SetState(1156) - p.GetErrorHandler().Sync(p) - switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 147, p.GetParserRuleContext()) { - case 1: - { - p.SetState(1154) - p.Values_clause() - } - - case 2: - { - p.SetState(1155) - p.Select_stmt() - } - - } - p.SetState(1159) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserON_ { - { - p.SetState(1158) - p.Upsert_clause() - } - - } - - case SQLiteParserDEFAULT_: - { - p.SetState(1161) - p.Match(SQLiteParserDEFAULT_) - } - { - p.SetState(1162) - p.Match(SQLiteParserVALUES_) - } - - default: - panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) - } - p.SetState(1166) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserRETURNING_ { - { - p.SetState(1165) - p.Returning_clause() - } - - } - - return localctx -} - -// IReturning_clauseContext is an interface to support dynamic dispatch. -type IReturning_clauseContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - RETURNING_() antlr.TerminalNode - AllResult_column() []IResult_columnContext - Result_column(i int) IResult_columnContext - AllCOMMA() []antlr.TerminalNode - COMMA(i int) antlr.TerminalNode - - // IsReturning_clauseContext differentiates from other interfaces. - IsReturning_clauseContext() -} - -type Returning_clauseContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyReturning_clauseContext() *Returning_clauseContext { - var p = new(Returning_clauseContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_returning_clause - return p -} - -func (*Returning_clauseContext) IsReturning_clauseContext() {} - -func NewReturning_clauseContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Returning_clauseContext { - var p = new(Returning_clauseContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_returning_clause - - return p -} - -func (s *Returning_clauseContext) GetParser() antlr.Parser { return s.parser } - -func (s *Returning_clauseContext) RETURNING_() antlr.TerminalNode { - return s.GetToken(SQLiteParserRETURNING_, 0) -} - -func (s *Returning_clauseContext) AllResult_column() []IResult_columnContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IResult_columnContext); ok { - len++ - } - } - - tst := make([]IResult_columnContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IResult_columnContext); ok { - tst[i] = t.(IResult_columnContext) - i++ - } - } - - return tst -} - -func (s *Returning_clauseContext) Result_column(i int) IResult_columnContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IResult_columnContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IResult_columnContext) -} - -func (s *Returning_clauseContext) AllCOMMA() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserCOMMA) -} - -func (s *Returning_clauseContext) COMMA(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserCOMMA, i) -} - -func (s *Returning_clauseContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Returning_clauseContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Returning_clauseContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterReturning_clause(s) - } -} - -func (s *Returning_clauseContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitReturning_clause(s) - } -} - -func (p *SQLiteParser) Returning_clause() (localctx IReturning_clauseContext) { - this := p - _ = this - - localctx = NewReturning_clauseContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 76, SQLiteParserRULE_returning_clause) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(1168) - p.Match(SQLiteParserRETURNING_) - } - { - p.SetState(1169) - p.Result_column() - } - p.SetState(1174) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for _la == SQLiteParserCOMMA { - { - p.SetState(1170) - p.Match(SQLiteParserCOMMA) - } - { - p.SetState(1171) - p.Result_column() - } - - p.SetState(1176) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - - return localctx -} - -// IUpsert_clauseContext is an interface to support dynamic dispatch. -type IUpsert_clauseContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - ON_() antlr.TerminalNode - CONFLICT_() antlr.TerminalNode - DO_() antlr.TerminalNode - NOTHING_() antlr.TerminalNode - UPDATE_() antlr.TerminalNode - SET_() antlr.TerminalNode - OPEN_PAR() antlr.TerminalNode - AllIndexed_column() []IIndexed_columnContext - Indexed_column(i int) IIndexed_columnContext - CLOSE_PAR() antlr.TerminalNode - AllASSIGN() []antlr.TerminalNode - ASSIGN(i int) antlr.TerminalNode - AllExpr() []IExprContext - Expr(i int) IExprContext - AllCOMMA() []antlr.TerminalNode - COMMA(i int) antlr.TerminalNode - AllWHERE_() []antlr.TerminalNode - WHERE_(i int) antlr.TerminalNode - AllColumn_name() []IColumn_nameContext - Column_name(i int) IColumn_nameContext - AllColumn_name_list() []IColumn_name_listContext - Column_name_list(i int) IColumn_name_listContext - - // IsUpsert_clauseContext differentiates from other interfaces. - IsUpsert_clauseContext() -} - -type Upsert_clauseContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyUpsert_clauseContext() *Upsert_clauseContext { - var p = new(Upsert_clauseContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_upsert_clause - return p -} - -func (*Upsert_clauseContext) IsUpsert_clauseContext() {} - -func NewUpsert_clauseContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Upsert_clauseContext { - var p = new(Upsert_clauseContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_upsert_clause - - return p -} - -func (s *Upsert_clauseContext) GetParser() antlr.Parser { return s.parser } - -func (s *Upsert_clauseContext) ON_() antlr.TerminalNode { - return s.GetToken(SQLiteParserON_, 0) -} - -func (s *Upsert_clauseContext) CONFLICT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserCONFLICT_, 0) -} - -func (s *Upsert_clauseContext) DO_() antlr.TerminalNode { - return s.GetToken(SQLiteParserDO_, 0) -} - -func (s *Upsert_clauseContext) NOTHING_() antlr.TerminalNode { - return s.GetToken(SQLiteParserNOTHING_, 0) -} - -func (s *Upsert_clauseContext) UPDATE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserUPDATE_, 0) -} - -func (s *Upsert_clauseContext) SET_() antlr.TerminalNode { - return s.GetToken(SQLiteParserSET_, 0) -} - -func (s *Upsert_clauseContext) OPEN_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserOPEN_PAR, 0) -} - -func (s *Upsert_clauseContext) AllIndexed_column() []IIndexed_columnContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IIndexed_columnContext); ok { - len++ - } - } - - tst := make([]IIndexed_columnContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IIndexed_columnContext); ok { - tst[i] = t.(IIndexed_columnContext) - i++ - } - } - - return tst -} - -func (s *Upsert_clauseContext) Indexed_column(i int) IIndexed_columnContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IIndexed_columnContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IIndexed_columnContext) -} - -func (s *Upsert_clauseContext) CLOSE_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserCLOSE_PAR, 0) -} - -func (s *Upsert_clauseContext) AllASSIGN() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserASSIGN) -} - -func (s *Upsert_clauseContext) ASSIGN(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserASSIGN, i) -} - -func (s *Upsert_clauseContext) AllExpr() []IExprContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IExprContext); ok { - len++ - } - } - - tst := make([]IExprContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IExprContext); ok { - tst[i] = t.(IExprContext) - i++ - } - } - - return tst -} - -func (s *Upsert_clauseContext) Expr(i int) IExprContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IExprContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IExprContext) -} - -func (s *Upsert_clauseContext) AllCOMMA() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserCOMMA) -} - -func (s *Upsert_clauseContext) COMMA(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserCOMMA, i) -} - -func (s *Upsert_clauseContext) AllWHERE_() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserWHERE_) -} - -func (s *Upsert_clauseContext) WHERE_(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserWHERE_, i) -} - -func (s *Upsert_clauseContext) AllColumn_name() []IColumn_nameContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IColumn_nameContext); ok { - len++ - } - } - - tst := make([]IColumn_nameContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IColumn_nameContext); ok { - tst[i] = t.(IColumn_nameContext) - i++ - } - } - - return tst -} - -func (s *Upsert_clauseContext) Column_name(i int) IColumn_nameContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IColumn_nameContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IColumn_nameContext) -} - -func (s *Upsert_clauseContext) AllColumn_name_list() []IColumn_name_listContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IColumn_name_listContext); ok { - len++ - } - } - - tst := make([]IColumn_name_listContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IColumn_name_listContext); ok { - tst[i] = t.(IColumn_name_listContext) - i++ - } - } - - return tst -} - -func (s *Upsert_clauseContext) Column_name_list(i int) IColumn_name_listContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IColumn_name_listContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IColumn_name_listContext) -} - -func (s *Upsert_clauseContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Upsert_clauseContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Upsert_clauseContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterUpsert_clause(s) - } -} - -func (s *Upsert_clauseContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitUpsert_clause(s) - } -} - -func (p *SQLiteParser) Upsert_clause() (localctx IUpsert_clauseContext) { - this := p - _ = this - - localctx = NewUpsert_clauseContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 78, SQLiteParserRULE_upsert_clause) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(1177) - p.Match(SQLiteParserON_) - } - { - p.SetState(1178) - p.Match(SQLiteParserCONFLICT_) - } - p.SetState(1193) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserOPEN_PAR { - { - p.SetState(1179) - p.Match(SQLiteParserOPEN_PAR) - } - { - p.SetState(1180) - p.Indexed_column() - } - p.SetState(1185) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for _la == SQLiteParserCOMMA { - { - p.SetState(1181) - p.Match(SQLiteParserCOMMA) - } - { - p.SetState(1182) - p.Indexed_column() - } - - p.SetState(1187) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - { - p.SetState(1188) - p.Match(SQLiteParserCLOSE_PAR) - } - p.SetState(1191) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserWHERE_ { - { - p.SetState(1189) - p.Match(SQLiteParserWHERE_) - } - { - p.SetState(1190) - p.expr(0) - } - - } - - } - { - p.SetState(1195) - p.Match(SQLiteParserDO_) - } - p.SetState(1222) - p.GetErrorHandler().Sync(p) - - switch p.GetTokenStream().LA(1) { - case SQLiteParserNOTHING_: - { - p.SetState(1196) - p.Match(SQLiteParserNOTHING_) - } - - case SQLiteParserUPDATE_: - { - p.SetState(1197) - p.Match(SQLiteParserUPDATE_) - } - { - p.SetState(1198) - p.Match(SQLiteParserSET_) - } - - p.SetState(1201) - p.GetErrorHandler().Sync(p) - switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 155, p.GetParserRuleContext()) { - case 1: - { - p.SetState(1199) - p.Column_name() - } - - case 2: - { - p.SetState(1200) - p.Column_name_list() - } - - } - { - p.SetState(1203) - p.Match(SQLiteParserASSIGN) - } - { - p.SetState(1204) - p.expr(0) - } - p.SetState(1215) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for _la == SQLiteParserCOMMA { - { - p.SetState(1205) - p.Match(SQLiteParserCOMMA) - } - p.SetState(1208) - p.GetErrorHandler().Sync(p) - switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 156, p.GetParserRuleContext()) { - case 1: - { - p.SetState(1206) - p.Column_name() - } - - case 2: - { - p.SetState(1207) - p.Column_name_list() - } - - } - { - p.SetState(1210) - p.Match(SQLiteParserASSIGN) - } - { - p.SetState(1211) - p.expr(0) - } - - p.SetState(1217) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - p.SetState(1220) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserWHERE_ { - { - p.SetState(1218) - p.Match(SQLiteParserWHERE_) - } - { - p.SetState(1219) - p.expr(0) - } - - } - - default: - panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) - } - - return localctx -} - -// IPragma_stmtContext is an interface to support dynamic dispatch. -type IPragma_stmtContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - PRAGMA_() antlr.TerminalNode - Pragma_name() IPragma_nameContext - Schema_name() ISchema_nameContext - DOT() antlr.TerminalNode - ASSIGN() antlr.TerminalNode - Pragma_value() IPragma_valueContext - OPEN_PAR() antlr.TerminalNode - CLOSE_PAR() antlr.TerminalNode - - // IsPragma_stmtContext differentiates from other interfaces. - IsPragma_stmtContext() -} - -type Pragma_stmtContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyPragma_stmtContext() *Pragma_stmtContext { - var p = new(Pragma_stmtContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_pragma_stmt - return p -} - -func (*Pragma_stmtContext) IsPragma_stmtContext() {} - -func NewPragma_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Pragma_stmtContext { - var p = new(Pragma_stmtContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_pragma_stmt - - return p -} - -func (s *Pragma_stmtContext) GetParser() antlr.Parser { return s.parser } - -func (s *Pragma_stmtContext) PRAGMA_() antlr.TerminalNode { - return s.GetToken(SQLiteParserPRAGMA_, 0) -} - -func (s *Pragma_stmtContext) Pragma_name() IPragma_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IPragma_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IPragma_nameContext) -} - -func (s *Pragma_stmtContext) Schema_name() ISchema_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ISchema_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ISchema_nameContext) -} - -func (s *Pragma_stmtContext) DOT() antlr.TerminalNode { - return s.GetToken(SQLiteParserDOT, 0) -} - -func (s *Pragma_stmtContext) ASSIGN() antlr.TerminalNode { - return s.GetToken(SQLiteParserASSIGN, 0) -} - -func (s *Pragma_stmtContext) Pragma_value() IPragma_valueContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IPragma_valueContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IPragma_valueContext) -} - -func (s *Pragma_stmtContext) OPEN_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserOPEN_PAR, 0) -} - -func (s *Pragma_stmtContext) CLOSE_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserCLOSE_PAR, 0) -} - -func (s *Pragma_stmtContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Pragma_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Pragma_stmtContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterPragma_stmt(s) - } -} - -func (s *Pragma_stmtContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitPragma_stmt(s) - } -} - -func (p *SQLiteParser) Pragma_stmt() (localctx IPragma_stmtContext) { - this := p - _ = this - - localctx = NewPragma_stmtContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 80, SQLiteParserRULE_pragma_stmt) - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(1224) - p.Match(SQLiteParserPRAGMA_) - } - p.SetState(1228) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 160, p.GetParserRuleContext()) == 1 { - { - p.SetState(1225) - p.Schema_name() - } - { - p.SetState(1226) - p.Match(SQLiteParserDOT) - } - - } - { - p.SetState(1230) - p.Pragma_name() - } - p.SetState(1237) - p.GetErrorHandler().Sync(p) - - switch p.GetTokenStream().LA(1) { - case SQLiteParserASSIGN: - { - p.SetState(1231) - p.Match(SQLiteParserASSIGN) - } - { - p.SetState(1232) - p.Pragma_value() - } - - case SQLiteParserOPEN_PAR: - { - p.SetState(1233) - p.Match(SQLiteParserOPEN_PAR) - } - { - p.SetState(1234) - p.Pragma_value() - } - { - p.SetState(1235) - p.Match(SQLiteParserCLOSE_PAR) - } - - case SQLiteParserEOF, SQLiteParserSCOL, SQLiteParserALTER_, SQLiteParserANALYZE_, SQLiteParserATTACH_, SQLiteParserBEGIN_, SQLiteParserCOMMIT_, SQLiteParserCREATE_, SQLiteParserDELETE_, SQLiteParserDETACH_, SQLiteParserDROP_, SQLiteParserEND_, SQLiteParserEXPLAIN_, SQLiteParserINSERT_, SQLiteParserPRAGMA_, SQLiteParserREINDEX_, SQLiteParserRELEASE_, SQLiteParserREPLACE_, SQLiteParserROLLBACK_, SQLiteParserSAVEPOINT_, SQLiteParserSELECT_, SQLiteParserUPDATE_, SQLiteParserVACUUM_, SQLiteParserVALUES_, SQLiteParserWITH_: - - default: - } - - return localctx -} - -// IPragma_valueContext is an interface to support dynamic dispatch. -type IPragma_valueContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - Signed_number() ISigned_numberContext - Name() INameContext - STRING_LITERAL() antlr.TerminalNode - - // IsPragma_valueContext differentiates from other interfaces. - IsPragma_valueContext() -} - -type Pragma_valueContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyPragma_valueContext() *Pragma_valueContext { - var p = new(Pragma_valueContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_pragma_value - return p -} - -func (*Pragma_valueContext) IsPragma_valueContext() {} - -func NewPragma_valueContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Pragma_valueContext { - var p = new(Pragma_valueContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_pragma_value - - return p -} - -func (s *Pragma_valueContext) GetParser() antlr.Parser { return s.parser } - -func (s *Pragma_valueContext) Signed_number() ISigned_numberContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ISigned_numberContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ISigned_numberContext) -} - -func (s *Pragma_valueContext) Name() INameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(INameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(INameContext) -} - -func (s *Pragma_valueContext) STRING_LITERAL() antlr.TerminalNode { - return s.GetToken(SQLiteParserSTRING_LITERAL, 0) -} - -func (s *Pragma_valueContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Pragma_valueContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Pragma_valueContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterPragma_value(s) - } -} - -func (s *Pragma_valueContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitPragma_value(s) - } -} - -func (p *SQLiteParser) Pragma_value() (localctx IPragma_valueContext) { - this := p - _ = this - - localctx = NewPragma_valueContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 82, SQLiteParserRULE_pragma_value) - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.SetState(1242) - p.GetErrorHandler().Sync(p) - switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 162, p.GetParserRuleContext()) { - case 1: - p.EnterOuterAlt(localctx, 1) - { - p.SetState(1239) - p.Signed_number() - } - - case 2: - p.EnterOuterAlt(localctx, 2) - { - p.SetState(1240) - p.Name() - } - - case 3: - p.EnterOuterAlt(localctx, 3) - { - p.SetState(1241) - p.Match(SQLiteParserSTRING_LITERAL) - } - - } - - return localctx -} - -// IReindex_stmtContext is an interface to support dynamic dispatch. -type IReindex_stmtContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - REINDEX_() antlr.TerminalNode - Collation_name() ICollation_nameContext - Table_name() ITable_nameContext - Index_name() IIndex_nameContext - Schema_name() ISchema_nameContext - DOT() antlr.TerminalNode - - // IsReindex_stmtContext differentiates from other interfaces. - IsReindex_stmtContext() -} - -type Reindex_stmtContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyReindex_stmtContext() *Reindex_stmtContext { - var p = new(Reindex_stmtContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_reindex_stmt - return p -} - -func (*Reindex_stmtContext) IsReindex_stmtContext() {} - -func NewReindex_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Reindex_stmtContext { - var p = new(Reindex_stmtContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_reindex_stmt - - return p -} - -func (s *Reindex_stmtContext) GetParser() antlr.Parser { return s.parser } - -func (s *Reindex_stmtContext) REINDEX_() antlr.TerminalNode { - return s.GetToken(SQLiteParserREINDEX_, 0) -} - -func (s *Reindex_stmtContext) Collation_name() ICollation_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ICollation_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ICollation_nameContext) -} - -func (s *Reindex_stmtContext) Table_name() ITable_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ITable_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ITable_nameContext) -} - -func (s *Reindex_stmtContext) Index_name() IIndex_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IIndex_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IIndex_nameContext) -} - -func (s *Reindex_stmtContext) Schema_name() ISchema_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ISchema_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ISchema_nameContext) -} - -func (s *Reindex_stmtContext) DOT() antlr.TerminalNode { - return s.GetToken(SQLiteParserDOT, 0) -} - -func (s *Reindex_stmtContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Reindex_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Reindex_stmtContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterReindex_stmt(s) - } -} - -func (s *Reindex_stmtContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitReindex_stmt(s) - } -} - -func (p *SQLiteParser) Reindex_stmt() (localctx IReindex_stmtContext) { - this := p - _ = this - - localctx = NewReindex_stmtContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 84, SQLiteParserRULE_reindex_stmt) - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(1244) - p.Match(SQLiteParserREINDEX_) - } - p.SetState(1255) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 165, p.GetParserRuleContext()) == 1 { - { - p.SetState(1245) - p.Collation_name() - } - - } else if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 165, p.GetParserRuleContext()) == 2 { - p.SetState(1249) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 163, p.GetParserRuleContext()) == 1 { - { - p.SetState(1246) - p.Schema_name() - } - { - p.SetState(1247) - p.Match(SQLiteParserDOT) - } - - } - p.SetState(1253) - p.GetErrorHandler().Sync(p) - switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 164, p.GetParserRuleContext()) { - case 1: - { - p.SetState(1251) - p.Table_name() - } - - case 2: - { - p.SetState(1252) - p.Index_name() - } - - } - - } - - return localctx -} - -// ISelect_stmtContext is an interface to support dynamic dispatch. -type ISelect_stmtContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - AllSelect_core() []ISelect_coreContext - Select_core(i int) ISelect_coreContext - Common_table_stmt() ICommon_table_stmtContext - AllCompound_operator() []ICompound_operatorContext - Compound_operator(i int) ICompound_operatorContext - Order_by_stmt() IOrder_by_stmtContext - Limit_stmt() ILimit_stmtContext - - // IsSelect_stmtContext differentiates from other interfaces. - IsSelect_stmtContext() -} - -type Select_stmtContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptySelect_stmtContext() *Select_stmtContext { - var p = new(Select_stmtContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_select_stmt - return p -} - -func (*Select_stmtContext) IsSelect_stmtContext() {} - -func NewSelect_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Select_stmtContext { - var p = new(Select_stmtContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_select_stmt - - return p -} - -func (s *Select_stmtContext) GetParser() antlr.Parser { return s.parser } - -func (s *Select_stmtContext) AllSelect_core() []ISelect_coreContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(ISelect_coreContext); ok { - len++ - } - } - - tst := make([]ISelect_coreContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(ISelect_coreContext); ok { - tst[i] = t.(ISelect_coreContext) - i++ - } - } - - return tst -} - -func (s *Select_stmtContext) Select_core(i int) ISelect_coreContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ISelect_coreContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(ISelect_coreContext) -} - -func (s *Select_stmtContext) Common_table_stmt() ICommon_table_stmtContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ICommon_table_stmtContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ICommon_table_stmtContext) -} - -func (s *Select_stmtContext) AllCompound_operator() []ICompound_operatorContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(ICompound_operatorContext); ok { - len++ - } - } - - tst := make([]ICompound_operatorContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(ICompound_operatorContext); ok { - tst[i] = t.(ICompound_operatorContext) - i++ - } - } - - return tst -} - -func (s *Select_stmtContext) Compound_operator(i int) ICompound_operatorContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ICompound_operatorContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(ICompound_operatorContext) -} - -func (s *Select_stmtContext) Order_by_stmt() IOrder_by_stmtContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IOrder_by_stmtContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IOrder_by_stmtContext) -} - -func (s *Select_stmtContext) Limit_stmt() ILimit_stmtContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ILimit_stmtContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ILimit_stmtContext) -} - -func (s *Select_stmtContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Select_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Select_stmtContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterSelect_stmt(s) - } -} - -func (s *Select_stmtContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitSelect_stmt(s) - } -} - -func (p *SQLiteParser) Select_stmt() (localctx ISelect_stmtContext) { - this := p - _ = this - - localctx = NewSelect_stmtContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 86, SQLiteParserRULE_select_stmt) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - var _alt int - - p.EnterOuterAlt(localctx, 1) - p.SetState(1258) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserWITH_ { - { - p.SetState(1257) - p.Common_table_stmt() - } - - } - { - p.SetState(1260) - p.Select_core() - } - p.SetState(1266) - p.GetErrorHandler().Sync(p) - _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 167, p.GetParserRuleContext()) - - for _alt != 2 && _alt != antlr.ATNInvalidAltNumber { - if _alt == 1 { - { - p.SetState(1261) - p.Compound_operator() - } - { - p.SetState(1262) - p.Select_core() - } - - } - p.SetState(1268) - p.GetErrorHandler().Sync(p) - _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 167, p.GetParserRuleContext()) - } - p.SetState(1270) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserORDER_ { - { - p.SetState(1269) - p.Order_by_stmt() - } - - } - p.SetState(1273) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserLIMIT_ { - { - p.SetState(1272) - p.Limit_stmt() - } - - } - - return localctx -} - -// IJoin_clauseContext is an interface to support dynamic dispatch. -type IJoin_clauseContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - AllTable_or_subquery() []ITable_or_subqueryContext - Table_or_subquery(i int) ITable_or_subqueryContext - AllJoin_operator() []IJoin_operatorContext - Join_operator(i int) IJoin_operatorContext - AllJoin_constraint() []IJoin_constraintContext - Join_constraint(i int) IJoin_constraintContext - - // IsJoin_clauseContext differentiates from other interfaces. - IsJoin_clauseContext() -} - -type Join_clauseContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyJoin_clauseContext() *Join_clauseContext { - var p = new(Join_clauseContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_join_clause - return p -} - -func (*Join_clauseContext) IsJoin_clauseContext() {} - -func NewJoin_clauseContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Join_clauseContext { - var p = new(Join_clauseContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_join_clause - - return p -} - -func (s *Join_clauseContext) GetParser() antlr.Parser { return s.parser } - -func (s *Join_clauseContext) AllTable_or_subquery() []ITable_or_subqueryContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(ITable_or_subqueryContext); ok { - len++ - } - } - - tst := make([]ITable_or_subqueryContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(ITable_or_subqueryContext); ok { - tst[i] = t.(ITable_or_subqueryContext) - i++ - } - } - - return tst -} - -func (s *Join_clauseContext) Table_or_subquery(i int) ITable_or_subqueryContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ITable_or_subqueryContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(ITable_or_subqueryContext) -} - -func (s *Join_clauseContext) AllJoin_operator() []IJoin_operatorContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IJoin_operatorContext); ok { - len++ - } - } - - tst := make([]IJoin_operatorContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IJoin_operatorContext); ok { - tst[i] = t.(IJoin_operatorContext) - i++ - } - } - - return tst -} - -func (s *Join_clauseContext) Join_operator(i int) IJoin_operatorContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IJoin_operatorContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IJoin_operatorContext) -} - -func (s *Join_clauseContext) AllJoin_constraint() []IJoin_constraintContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IJoin_constraintContext); ok { - len++ - } - } - - tst := make([]IJoin_constraintContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IJoin_constraintContext); ok { - tst[i] = t.(IJoin_constraintContext) - i++ - } - } - - return tst -} - -func (s *Join_clauseContext) Join_constraint(i int) IJoin_constraintContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IJoin_constraintContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IJoin_constraintContext) -} - -func (s *Join_clauseContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Join_clauseContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Join_clauseContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterJoin_clause(s) - } -} - -func (s *Join_clauseContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitJoin_clause(s) - } -} - -func (p *SQLiteParser) Join_clause() (localctx IJoin_clauseContext) { - this := p - _ = this - - localctx = NewJoin_clauseContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 88, SQLiteParserRULE_join_clause) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(1275) - p.Table_or_subquery() - } - p.SetState(1283) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for _la == SQLiteParserCOMMA || _la == SQLiteParserCROSS_ || ((int64((_la-87)) & ^0x3f) == 0 && ((int64(1)<<(_la-87))&8833) != 0) { - { - p.SetState(1276) - p.Join_operator() - } - { - p.SetState(1277) - p.Table_or_subquery() - } - p.SetState(1279) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 170, p.GetParserRuleContext()) == 1 { - { - p.SetState(1278) - p.Join_constraint() - } - - } - - p.SetState(1285) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - - return localctx -} - -// ISelect_coreContext is an interface to support dynamic dispatch. -type ISelect_coreContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // GetWhereExpr returns the whereExpr rule contexts. - GetWhereExpr() IExprContext - - // Get_expr returns the _expr rule contexts. - Get_expr() IExprContext - - // GetHavingExpr returns the havingExpr rule contexts. - GetHavingExpr() IExprContext - - // SetWhereExpr sets the whereExpr rule contexts. - SetWhereExpr(IExprContext) - - // Set_expr sets the _expr rule contexts. - Set_expr(IExprContext) - - // SetHavingExpr sets the havingExpr rule contexts. - SetHavingExpr(IExprContext) - - // GetGroupByExpr returns the groupByExpr rule context list. - GetGroupByExpr() []IExprContext - - // SetGroupByExpr sets the groupByExpr rule context list. - SetGroupByExpr([]IExprContext) - - // Getter signatures - SELECT_() antlr.TerminalNode - AllResult_column() []IResult_columnContext - Result_column(i int) IResult_columnContext - AllCOMMA() []antlr.TerminalNode - COMMA(i int) antlr.TerminalNode - FROM_() antlr.TerminalNode - WHERE_() antlr.TerminalNode - GROUP_() antlr.TerminalNode - BY_() antlr.TerminalNode - WINDOW_() antlr.TerminalNode - AllWindow_name() []IWindow_nameContext - Window_name(i int) IWindow_nameContext - AllAS_() []antlr.TerminalNode - AS_(i int) antlr.TerminalNode - AllWindow_defn() []IWindow_defnContext - Window_defn(i int) IWindow_defnContext - DISTINCT_() antlr.TerminalNode - ALL_() antlr.TerminalNode - AllExpr() []IExprContext - Expr(i int) IExprContext - AllTable_or_subquery() []ITable_or_subqueryContext - Table_or_subquery(i int) ITable_or_subqueryContext - Join_clause() IJoin_clauseContext - HAVING_() antlr.TerminalNode - Values_clause() IValues_clauseContext - - // IsSelect_coreContext differentiates from other interfaces. - IsSelect_coreContext() -} - -type Select_coreContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser - whereExpr IExprContext - _expr IExprContext - groupByExpr []IExprContext - havingExpr IExprContext -} - -func NewEmptySelect_coreContext() *Select_coreContext { - var p = new(Select_coreContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_select_core - return p -} - -func (*Select_coreContext) IsSelect_coreContext() {} - -func NewSelect_coreContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Select_coreContext { - var p = new(Select_coreContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_select_core - - return p -} - -func (s *Select_coreContext) GetParser() antlr.Parser { return s.parser } - -func (s *Select_coreContext) GetWhereExpr() IExprContext { return s.whereExpr } - -func (s *Select_coreContext) Get_expr() IExprContext { return s._expr } - -func (s *Select_coreContext) GetHavingExpr() IExprContext { return s.havingExpr } - -func (s *Select_coreContext) SetWhereExpr(v IExprContext) { s.whereExpr = v } - -func (s *Select_coreContext) Set_expr(v IExprContext) { s._expr = v } - -func (s *Select_coreContext) SetHavingExpr(v IExprContext) { s.havingExpr = v } - -func (s *Select_coreContext) GetGroupByExpr() []IExprContext { return s.groupByExpr } - -func (s *Select_coreContext) SetGroupByExpr(v []IExprContext) { s.groupByExpr = v } - -func (s *Select_coreContext) SELECT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserSELECT_, 0) -} - -func (s *Select_coreContext) AllResult_column() []IResult_columnContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IResult_columnContext); ok { - len++ - } - } - - tst := make([]IResult_columnContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IResult_columnContext); ok { - tst[i] = t.(IResult_columnContext) - i++ - } - } - - return tst -} - -func (s *Select_coreContext) Result_column(i int) IResult_columnContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IResult_columnContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IResult_columnContext) -} - -func (s *Select_coreContext) AllCOMMA() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserCOMMA) -} - -func (s *Select_coreContext) COMMA(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserCOMMA, i) -} - -func (s *Select_coreContext) FROM_() antlr.TerminalNode { - return s.GetToken(SQLiteParserFROM_, 0) -} - -func (s *Select_coreContext) WHERE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserWHERE_, 0) -} - -func (s *Select_coreContext) GROUP_() antlr.TerminalNode { - return s.GetToken(SQLiteParserGROUP_, 0) -} - -func (s *Select_coreContext) BY_() antlr.TerminalNode { - return s.GetToken(SQLiteParserBY_, 0) -} - -func (s *Select_coreContext) WINDOW_() antlr.TerminalNode { - return s.GetToken(SQLiteParserWINDOW_, 0) -} - -func (s *Select_coreContext) AllWindow_name() []IWindow_nameContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IWindow_nameContext); ok { - len++ - } - } - - tst := make([]IWindow_nameContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IWindow_nameContext); ok { - tst[i] = t.(IWindow_nameContext) - i++ - } - } - - return tst -} - -func (s *Select_coreContext) Window_name(i int) IWindow_nameContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IWindow_nameContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IWindow_nameContext) -} - -func (s *Select_coreContext) AllAS_() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserAS_) -} - -func (s *Select_coreContext) AS_(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserAS_, i) -} - -func (s *Select_coreContext) AllWindow_defn() []IWindow_defnContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IWindow_defnContext); ok { - len++ - } - } - - tst := make([]IWindow_defnContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IWindow_defnContext); ok { - tst[i] = t.(IWindow_defnContext) - i++ - } - } - - return tst -} - -func (s *Select_coreContext) Window_defn(i int) IWindow_defnContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IWindow_defnContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IWindow_defnContext) -} - -func (s *Select_coreContext) DISTINCT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserDISTINCT_, 0) -} - -func (s *Select_coreContext) ALL_() antlr.TerminalNode { - return s.GetToken(SQLiteParserALL_, 0) -} - -func (s *Select_coreContext) AllExpr() []IExprContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IExprContext); ok { - len++ - } - } - - tst := make([]IExprContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IExprContext); ok { - tst[i] = t.(IExprContext) - i++ - } - } - - return tst -} - -func (s *Select_coreContext) Expr(i int) IExprContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IExprContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IExprContext) -} - -func (s *Select_coreContext) AllTable_or_subquery() []ITable_or_subqueryContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(ITable_or_subqueryContext); ok { - len++ - } - } - - tst := make([]ITable_or_subqueryContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(ITable_or_subqueryContext); ok { - tst[i] = t.(ITable_or_subqueryContext) - i++ - } - } - - return tst -} - -func (s *Select_coreContext) Table_or_subquery(i int) ITable_or_subqueryContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ITable_or_subqueryContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(ITable_or_subqueryContext) -} - -func (s *Select_coreContext) Join_clause() IJoin_clauseContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IJoin_clauseContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IJoin_clauseContext) -} - -func (s *Select_coreContext) HAVING_() antlr.TerminalNode { - return s.GetToken(SQLiteParserHAVING_, 0) -} - -func (s *Select_coreContext) Values_clause() IValues_clauseContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IValues_clauseContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IValues_clauseContext) -} - -func (s *Select_coreContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Select_coreContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Select_coreContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterSelect_core(s) - } -} - -func (s *Select_coreContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitSelect_core(s) - } -} - -func (p *SQLiteParser) Select_core() (localctx ISelect_coreContext) { - this := p - _ = this - - localctx = NewSelect_coreContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 90, SQLiteParserRULE_select_core) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.SetState(1349) - p.GetErrorHandler().Sync(p) - - switch p.GetTokenStream().LA(1) { - case SQLiteParserSELECT_: - p.EnterOuterAlt(localctx, 1) - { - p.SetState(1286) - p.Match(SQLiteParserSELECT_) - } - p.SetState(1288) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 172, p.GetParserRuleContext()) == 1 { - { - p.SetState(1287) - _la = p.GetTokenStream().LA(1) - - if !(_la == SQLiteParserALL_ || _la == SQLiteParserDISTINCT_) { - p.GetErrorHandler().RecoverInline(p) - } else { - p.GetErrorHandler().ReportMatch(p) - p.Consume() - } - } - - } - { - p.SetState(1290) - p.Result_column() - } - p.SetState(1295) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for _la == SQLiteParserCOMMA { - { - p.SetState(1291) - p.Match(SQLiteParserCOMMA) - } - { - p.SetState(1292) - p.Result_column() - } - - p.SetState(1297) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - p.SetState(1310) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserFROM_ { - { - p.SetState(1298) - p.Match(SQLiteParserFROM_) - } - p.SetState(1308) - p.GetErrorHandler().Sync(p) - switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 175, p.GetParserRuleContext()) { - case 1: - { - p.SetState(1299) - p.Table_or_subquery() - } - p.SetState(1304) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for _la == SQLiteParserCOMMA { - { - p.SetState(1300) - p.Match(SQLiteParserCOMMA) - } - { - p.SetState(1301) - p.Table_or_subquery() - } - - p.SetState(1306) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - - case 2: - { - p.SetState(1307) - p.Join_clause() - } - - } - - } - p.SetState(1314) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserWHERE_ { - { - p.SetState(1312) - p.Match(SQLiteParserWHERE_) - } - { - p.SetState(1313) - - var _x = p.expr(0) - - localctx.(*Select_coreContext).whereExpr = _x - } - - } - p.SetState(1330) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserGROUP_ { - { - p.SetState(1316) - p.Match(SQLiteParserGROUP_) - } - { - p.SetState(1317) - p.Match(SQLiteParserBY_) - } - { - p.SetState(1318) - - var _x = p.expr(0) - - localctx.(*Select_coreContext)._expr = _x - } - localctx.(*Select_coreContext).groupByExpr = append(localctx.(*Select_coreContext).groupByExpr, localctx.(*Select_coreContext)._expr) - p.SetState(1323) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for _la == SQLiteParserCOMMA { - { - p.SetState(1319) - p.Match(SQLiteParserCOMMA) - } - { - p.SetState(1320) - - var _x = p.expr(0) - - localctx.(*Select_coreContext)._expr = _x - } - localctx.(*Select_coreContext).groupByExpr = append(localctx.(*Select_coreContext).groupByExpr, localctx.(*Select_coreContext)._expr) - - p.SetState(1325) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - p.SetState(1328) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserHAVING_ { - { - p.SetState(1326) - p.Match(SQLiteParserHAVING_) - } - { - p.SetState(1327) - - var _x = p.expr(0) - - localctx.(*Select_coreContext).havingExpr = _x - } - - } - - } - p.SetState(1346) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserWINDOW_ { - { - p.SetState(1332) - p.Match(SQLiteParserWINDOW_) - } - { - p.SetState(1333) - p.Window_name() - } - { - p.SetState(1334) - p.Match(SQLiteParserAS_) - } - { - p.SetState(1335) - p.Window_defn() - } - p.SetState(1343) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for _la == SQLiteParserCOMMA { - { - p.SetState(1336) - p.Match(SQLiteParserCOMMA) - } - { - p.SetState(1337) - p.Window_name() - } - { - p.SetState(1338) - p.Match(SQLiteParserAS_) - } - { - p.SetState(1339) - p.Window_defn() - } - - p.SetState(1345) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - - } - - case SQLiteParserVALUES_: - p.EnterOuterAlt(localctx, 2) - { - p.SetState(1348) - p.Values_clause() - } - - default: - panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) - } - - return localctx -} - -// IFactored_select_stmtContext is an interface to support dynamic dispatch. -type IFactored_select_stmtContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - Select_stmt() ISelect_stmtContext - - // IsFactored_select_stmtContext differentiates from other interfaces. - IsFactored_select_stmtContext() -} - -type Factored_select_stmtContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyFactored_select_stmtContext() *Factored_select_stmtContext { - var p = new(Factored_select_stmtContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_factored_select_stmt - return p -} - -func (*Factored_select_stmtContext) IsFactored_select_stmtContext() {} - -func NewFactored_select_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Factored_select_stmtContext { - var p = new(Factored_select_stmtContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_factored_select_stmt - - return p -} - -func (s *Factored_select_stmtContext) GetParser() antlr.Parser { return s.parser } - -func (s *Factored_select_stmtContext) Select_stmt() ISelect_stmtContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ISelect_stmtContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ISelect_stmtContext) -} - -func (s *Factored_select_stmtContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Factored_select_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Factored_select_stmtContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterFactored_select_stmt(s) - } -} - -func (s *Factored_select_stmtContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitFactored_select_stmt(s) - } -} - -func (p *SQLiteParser) Factored_select_stmt() (localctx IFactored_select_stmtContext) { - this := p - _ = this - - localctx = NewFactored_select_stmtContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 92, SQLiteParserRULE_factored_select_stmt) - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(1351) - p.Select_stmt() - } - - return localctx -} - -// ISimple_select_stmtContext is an interface to support dynamic dispatch. -type ISimple_select_stmtContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - Select_core() ISelect_coreContext - Common_table_stmt() ICommon_table_stmtContext - Order_by_stmt() IOrder_by_stmtContext - Limit_stmt() ILimit_stmtContext - - // IsSimple_select_stmtContext differentiates from other interfaces. - IsSimple_select_stmtContext() -} - -type Simple_select_stmtContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptySimple_select_stmtContext() *Simple_select_stmtContext { - var p = new(Simple_select_stmtContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_simple_select_stmt - return p -} - -func (*Simple_select_stmtContext) IsSimple_select_stmtContext() {} - -func NewSimple_select_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Simple_select_stmtContext { - var p = new(Simple_select_stmtContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_simple_select_stmt - - return p -} - -func (s *Simple_select_stmtContext) GetParser() antlr.Parser { return s.parser } - -func (s *Simple_select_stmtContext) Select_core() ISelect_coreContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ISelect_coreContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ISelect_coreContext) -} - -func (s *Simple_select_stmtContext) Common_table_stmt() ICommon_table_stmtContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ICommon_table_stmtContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ICommon_table_stmtContext) -} - -func (s *Simple_select_stmtContext) Order_by_stmt() IOrder_by_stmtContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IOrder_by_stmtContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IOrder_by_stmtContext) -} - -func (s *Simple_select_stmtContext) Limit_stmt() ILimit_stmtContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ILimit_stmtContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ILimit_stmtContext) -} - -func (s *Simple_select_stmtContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Simple_select_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Simple_select_stmtContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterSimple_select_stmt(s) - } -} - -func (s *Simple_select_stmtContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitSimple_select_stmt(s) - } -} - -func (p *SQLiteParser) Simple_select_stmt() (localctx ISimple_select_stmtContext) { - this := p - _ = this - - localctx = NewSimple_select_stmtContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 94, SQLiteParserRULE_simple_select_stmt) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - p.SetState(1354) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserWITH_ { - { - p.SetState(1353) - p.Common_table_stmt() - } - - } - { - p.SetState(1356) - p.Select_core() - } - p.SetState(1358) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserORDER_ { - { - p.SetState(1357) - p.Order_by_stmt() - } - - } - p.SetState(1361) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserLIMIT_ { - { - p.SetState(1360) - p.Limit_stmt() - } - - } - - return localctx -} - -// ICompound_select_stmtContext is an interface to support dynamic dispatch. -type ICompound_select_stmtContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - AllSelect_core() []ISelect_coreContext - Select_core(i int) ISelect_coreContext - Common_table_stmt() ICommon_table_stmtContext - Order_by_stmt() IOrder_by_stmtContext - Limit_stmt() ILimit_stmtContext - AllUNION_() []antlr.TerminalNode - UNION_(i int) antlr.TerminalNode - AllINTERSECT_() []antlr.TerminalNode - INTERSECT_(i int) antlr.TerminalNode - AllEXCEPT_() []antlr.TerminalNode - EXCEPT_(i int) antlr.TerminalNode - AllALL_() []antlr.TerminalNode - ALL_(i int) antlr.TerminalNode - - // IsCompound_select_stmtContext differentiates from other interfaces. - IsCompound_select_stmtContext() -} - -type Compound_select_stmtContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyCompound_select_stmtContext() *Compound_select_stmtContext { - var p = new(Compound_select_stmtContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_compound_select_stmt - return p -} - -func (*Compound_select_stmtContext) IsCompound_select_stmtContext() {} - -func NewCompound_select_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Compound_select_stmtContext { - var p = new(Compound_select_stmtContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_compound_select_stmt - - return p -} - -func (s *Compound_select_stmtContext) GetParser() antlr.Parser { return s.parser } - -func (s *Compound_select_stmtContext) AllSelect_core() []ISelect_coreContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(ISelect_coreContext); ok { - len++ - } - } - - tst := make([]ISelect_coreContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(ISelect_coreContext); ok { - tst[i] = t.(ISelect_coreContext) - i++ - } - } - - return tst -} - -func (s *Compound_select_stmtContext) Select_core(i int) ISelect_coreContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ISelect_coreContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(ISelect_coreContext) -} - -func (s *Compound_select_stmtContext) Common_table_stmt() ICommon_table_stmtContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ICommon_table_stmtContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ICommon_table_stmtContext) -} - -func (s *Compound_select_stmtContext) Order_by_stmt() IOrder_by_stmtContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IOrder_by_stmtContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IOrder_by_stmtContext) -} - -func (s *Compound_select_stmtContext) Limit_stmt() ILimit_stmtContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ILimit_stmtContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ILimit_stmtContext) -} - -func (s *Compound_select_stmtContext) AllUNION_() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserUNION_) -} - -func (s *Compound_select_stmtContext) UNION_(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserUNION_, i) -} - -func (s *Compound_select_stmtContext) AllINTERSECT_() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserINTERSECT_) -} - -func (s *Compound_select_stmtContext) INTERSECT_(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserINTERSECT_, i) -} - -func (s *Compound_select_stmtContext) AllEXCEPT_() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserEXCEPT_) -} - -func (s *Compound_select_stmtContext) EXCEPT_(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserEXCEPT_, i) -} - -func (s *Compound_select_stmtContext) AllALL_() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserALL_) -} - -func (s *Compound_select_stmtContext) ALL_(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserALL_, i) -} - -func (s *Compound_select_stmtContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Compound_select_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Compound_select_stmtContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterCompound_select_stmt(s) - } -} - -func (s *Compound_select_stmtContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitCompound_select_stmt(s) - } -} - -func (p *SQLiteParser) Compound_select_stmt() (localctx ICompound_select_stmtContext) { - this := p - _ = this - - localctx = NewCompound_select_stmtContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 96, SQLiteParserRULE_compound_select_stmt) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - p.SetState(1364) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserWITH_ { - { - p.SetState(1363) - p.Common_table_stmt() - } - - } - { - p.SetState(1366) - p.Select_core() - } - p.SetState(1376) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for ok := true; ok; ok = _la == SQLiteParserEXCEPT_ || _la == SQLiteParserINTERSECT_ || _la == SQLiteParserUNION_ { - p.SetState(1373) - p.GetErrorHandler().Sync(p) - - switch p.GetTokenStream().LA(1) { - case SQLiteParserUNION_: - { - p.SetState(1367) - p.Match(SQLiteParserUNION_) - } - p.SetState(1369) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserALL_ { - { - p.SetState(1368) - p.Match(SQLiteParserALL_) - } - - } - - case SQLiteParserINTERSECT_: - { - p.SetState(1371) - p.Match(SQLiteParserINTERSECT_) - } - - case SQLiteParserEXCEPT_: - { - p.SetState(1372) - p.Match(SQLiteParserEXCEPT_) - } - - default: - panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) - } - { - p.SetState(1375) - p.Select_core() - } - - p.SetState(1378) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - p.SetState(1381) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserORDER_ { - { - p.SetState(1380) - p.Order_by_stmt() - } - - } - p.SetState(1384) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserLIMIT_ { - { - p.SetState(1383) - p.Limit_stmt() - } - - } - - return localctx -} - -// ITable_or_subqueryContext is an interface to support dynamic dispatch. -type ITable_or_subqueryContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - Table_name() ITable_nameContext - Schema_name() ISchema_nameContext - DOT() antlr.TerminalNode - Table_alias() ITable_aliasContext - INDEXED_() antlr.TerminalNode - BY_() antlr.TerminalNode - Index_name() IIndex_nameContext - NOT_() antlr.TerminalNode - AS_() antlr.TerminalNode - Table_function_name() ITable_function_nameContext - OPEN_PAR() antlr.TerminalNode - AllExpr() []IExprContext - Expr(i int) IExprContext - CLOSE_PAR() antlr.TerminalNode - AllCOMMA() []antlr.TerminalNode - COMMA(i int) antlr.TerminalNode - AllTable_or_subquery() []ITable_or_subqueryContext - Table_or_subquery(i int) ITable_or_subqueryContext - Join_clause() IJoin_clauseContext - Select_stmt() ISelect_stmtContext - - // IsTable_or_subqueryContext differentiates from other interfaces. - IsTable_or_subqueryContext() -} - -type Table_or_subqueryContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyTable_or_subqueryContext() *Table_or_subqueryContext { - var p = new(Table_or_subqueryContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_table_or_subquery - return p -} - -func (*Table_or_subqueryContext) IsTable_or_subqueryContext() {} - -func NewTable_or_subqueryContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Table_or_subqueryContext { - var p = new(Table_or_subqueryContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_table_or_subquery - - return p -} - -func (s *Table_or_subqueryContext) GetParser() antlr.Parser { return s.parser } - -func (s *Table_or_subqueryContext) Table_name() ITable_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ITable_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ITable_nameContext) -} - -func (s *Table_or_subqueryContext) Schema_name() ISchema_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ISchema_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ISchema_nameContext) -} - -func (s *Table_or_subqueryContext) DOT() antlr.TerminalNode { - return s.GetToken(SQLiteParserDOT, 0) -} - -func (s *Table_or_subqueryContext) Table_alias() ITable_aliasContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ITable_aliasContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ITable_aliasContext) -} - -func (s *Table_or_subqueryContext) INDEXED_() antlr.TerminalNode { - return s.GetToken(SQLiteParserINDEXED_, 0) -} - -func (s *Table_or_subqueryContext) BY_() antlr.TerminalNode { - return s.GetToken(SQLiteParserBY_, 0) -} - -func (s *Table_or_subqueryContext) Index_name() IIndex_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IIndex_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IIndex_nameContext) -} - -func (s *Table_or_subqueryContext) NOT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserNOT_, 0) -} - -func (s *Table_or_subqueryContext) AS_() antlr.TerminalNode { - return s.GetToken(SQLiteParserAS_, 0) -} - -func (s *Table_or_subqueryContext) Table_function_name() ITable_function_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ITable_function_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ITable_function_nameContext) -} - -func (s *Table_or_subqueryContext) OPEN_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserOPEN_PAR, 0) -} - -func (s *Table_or_subqueryContext) AllExpr() []IExprContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IExprContext); ok { - len++ - } - } - - tst := make([]IExprContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IExprContext); ok { - tst[i] = t.(IExprContext) - i++ - } - } - - return tst -} - -func (s *Table_or_subqueryContext) Expr(i int) IExprContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IExprContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IExprContext) -} - -func (s *Table_or_subqueryContext) CLOSE_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserCLOSE_PAR, 0) -} - -func (s *Table_or_subqueryContext) AllCOMMA() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserCOMMA) -} - -func (s *Table_or_subqueryContext) COMMA(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserCOMMA, i) -} - -func (s *Table_or_subqueryContext) AllTable_or_subquery() []ITable_or_subqueryContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(ITable_or_subqueryContext); ok { - len++ - } - } - - tst := make([]ITable_or_subqueryContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(ITable_or_subqueryContext); ok { - tst[i] = t.(ITable_or_subqueryContext) - i++ - } - } - - return tst -} - -func (s *Table_or_subqueryContext) Table_or_subquery(i int) ITable_or_subqueryContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ITable_or_subqueryContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(ITable_or_subqueryContext) -} - -func (s *Table_or_subqueryContext) Join_clause() IJoin_clauseContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IJoin_clauseContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IJoin_clauseContext) -} - -func (s *Table_or_subqueryContext) Select_stmt() ISelect_stmtContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ISelect_stmtContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ISelect_stmtContext) -} - -func (s *Table_or_subqueryContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Table_or_subqueryContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Table_or_subqueryContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterTable_or_subquery(s) - } -} - -func (s *Table_or_subqueryContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitTable_or_subquery(s) - } -} - -func (p *SQLiteParser) Table_or_subquery() (localctx ITable_or_subqueryContext) { - this := p - _ = this - - localctx = NewTable_or_subqueryContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 98, SQLiteParserRULE_table_or_subquery) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.SetState(1450) - p.GetErrorHandler().Sync(p) - switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 205, p.GetParserRuleContext()) { - case 1: - p.EnterOuterAlt(localctx, 1) - p.SetState(1389) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 193, p.GetParserRuleContext()) == 1 { - { - p.SetState(1386) - p.Schema_name() - } - { - p.SetState(1387) - p.Match(SQLiteParserDOT) - } - - } - { - p.SetState(1391) - p.Table_name() - } - p.SetState(1396) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 195, p.GetParserRuleContext()) == 1 { - p.SetState(1393) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 194, p.GetParserRuleContext()) == 1 { - { - p.SetState(1392) - p.Match(SQLiteParserAS_) - } - - } - { - p.SetState(1395) - p.Table_alias() - } - - } - p.SetState(1403) - p.GetErrorHandler().Sync(p) - - switch p.GetTokenStream().LA(1) { - case SQLiteParserINDEXED_: - { - p.SetState(1398) - p.Match(SQLiteParserINDEXED_) - } - { - p.SetState(1399) - p.Match(SQLiteParserBY_) - } - { - p.SetState(1400) - p.Index_name() - } - - case SQLiteParserNOT_: - { - p.SetState(1401) - p.Match(SQLiteParserNOT_) - } - { - p.SetState(1402) - p.Match(SQLiteParserINDEXED_) - } - - case SQLiteParserEOF, SQLiteParserSCOL, SQLiteParserCLOSE_PAR, SQLiteParserCOMMA, SQLiteParserALTER_, SQLiteParserANALYZE_, SQLiteParserATTACH_, SQLiteParserBEGIN_, SQLiteParserCOMMIT_, SQLiteParserCREATE_, SQLiteParserCROSS_, SQLiteParserDELETE_, SQLiteParserDETACH_, SQLiteParserDROP_, SQLiteParserEND_, SQLiteParserEXCEPT_, SQLiteParserEXPLAIN_, SQLiteParserGROUP_, SQLiteParserINNER_, SQLiteParserINSERT_, SQLiteParserINTERSECT_, SQLiteParserJOIN_, SQLiteParserLEFT_, SQLiteParserLIMIT_, SQLiteParserNATURAL_, SQLiteParserON_, SQLiteParserORDER_, SQLiteParserPRAGMA_, SQLiteParserREINDEX_, SQLiteParserRELEASE_, SQLiteParserREPLACE_, SQLiteParserRETURNING_, SQLiteParserROLLBACK_, SQLiteParserSAVEPOINT_, SQLiteParserSELECT_, SQLiteParserUNION_, SQLiteParserUPDATE_, SQLiteParserUSING_, SQLiteParserVACUUM_, SQLiteParserVALUES_, SQLiteParserWHERE_, SQLiteParserWITH_, SQLiteParserWINDOW_: - - default: - } - - case 2: - p.EnterOuterAlt(localctx, 2) - p.SetState(1408) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 197, p.GetParserRuleContext()) == 1 { - { - p.SetState(1405) - p.Schema_name() - } - { - p.SetState(1406) - p.Match(SQLiteParserDOT) - } - - } - { - p.SetState(1410) - p.Table_function_name() - } - { - p.SetState(1411) - p.Match(SQLiteParserOPEN_PAR) - } - { - p.SetState(1412) - p.expr(0) - } - p.SetState(1417) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for _la == SQLiteParserCOMMA { - { - p.SetState(1413) - p.Match(SQLiteParserCOMMA) - } - { - p.SetState(1414) - p.expr(0) - } - - p.SetState(1419) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - { - p.SetState(1420) - p.Match(SQLiteParserCLOSE_PAR) - } - p.SetState(1425) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 200, p.GetParserRuleContext()) == 1 { - p.SetState(1422) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 199, p.GetParserRuleContext()) == 1 { - { - p.SetState(1421) - p.Match(SQLiteParserAS_) - } - - } - { - p.SetState(1424) - p.Table_alias() - } - - } - - case 3: - p.EnterOuterAlt(localctx, 3) - { - p.SetState(1427) - p.Match(SQLiteParserOPEN_PAR) - } - p.SetState(1437) - p.GetErrorHandler().Sync(p) - switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 202, p.GetParserRuleContext()) { - case 1: - { - p.SetState(1428) - p.Table_or_subquery() - } - p.SetState(1433) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for _la == SQLiteParserCOMMA { - { - p.SetState(1429) - p.Match(SQLiteParserCOMMA) - } - { - p.SetState(1430) - p.Table_or_subquery() - } - - p.SetState(1435) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - - case 2: - { - p.SetState(1436) - p.Join_clause() - } - - } - { - p.SetState(1439) - p.Match(SQLiteParserCLOSE_PAR) - } - - case 4: - p.EnterOuterAlt(localctx, 4) - { - p.SetState(1441) - p.Match(SQLiteParserOPEN_PAR) - } - { - p.SetState(1442) - p.Select_stmt() - } - { - p.SetState(1443) - p.Match(SQLiteParserCLOSE_PAR) - } - p.SetState(1448) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 204, p.GetParserRuleContext()) == 1 { - p.SetState(1445) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 203, p.GetParserRuleContext()) == 1 { - { - p.SetState(1444) - p.Match(SQLiteParserAS_) - } - - } - { - p.SetState(1447) - p.Table_alias() - } - - } - - } - - return localctx -} - -// IResult_columnContext is an interface to support dynamic dispatch. -type IResult_columnContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - STAR() antlr.TerminalNode - Table_name() ITable_nameContext - DOT() antlr.TerminalNode - Expr() IExprContext - Column_alias() IColumn_aliasContext - AS_() antlr.TerminalNode - - // IsResult_columnContext differentiates from other interfaces. - IsResult_columnContext() -} - -type Result_columnContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyResult_columnContext() *Result_columnContext { - var p = new(Result_columnContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_result_column - return p -} - -func (*Result_columnContext) IsResult_columnContext() {} - -func NewResult_columnContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Result_columnContext { - var p = new(Result_columnContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_result_column - - return p -} - -func (s *Result_columnContext) GetParser() antlr.Parser { return s.parser } - -func (s *Result_columnContext) STAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserSTAR, 0) -} - -func (s *Result_columnContext) Table_name() ITable_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ITable_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ITable_nameContext) -} - -func (s *Result_columnContext) DOT() antlr.TerminalNode { - return s.GetToken(SQLiteParserDOT, 0) -} - -func (s *Result_columnContext) Expr() IExprContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IExprContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IExprContext) -} - -func (s *Result_columnContext) Column_alias() IColumn_aliasContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IColumn_aliasContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IColumn_aliasContext) -} - -func (s *Result_columnContext) AS_() antlr.TerminalNode { - return s.GetToken(SQLiteParserAS_, 0) -} - -func (s *Result_columnContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Result_columnContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Result_columnContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterResult_column(s) - } -} - -func (s *Result_columnContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitResult_column(s) - } -} - -func (p *SQLiteParser) Result_column() (localctx IResult_columnContext) { - this := p - _ = this - - localctx = NewResult_columnContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 100, SQLiteParserRULE_result_column) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.SetState(1464) - p.GetErrorHandler().Sync(p) - switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 208, p.GetParserRuleContext()) { - case 1: - p.EnterOuterAlt(localctx, 1) - { - p.SetState(1452) - p.Match(SQLiteParserSTAR) - } - - case 2: - p.EnterOuterAlt(localctx, 2) - { - p.SetState(1453) - p.Table_name() - } - { - p.SetState(1454) - p.Match(SQLiteParserDOT) - } - { - p.SetState(1455) - p.Match(SQLiteParserSTAR) - } - - case 3: - p.EnterOuterAlt(localctx, 3) - { - p.SetState(1457) - p.expr(0) - } - p.SetState(1462) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserAS_ || _la == SQLiteParserIDENTIFIER || _la == SQLiteParserSTRING_LITERAL { - p.SetState(1459) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserAS_ { - { - p.SetState(1458) - p.Match(SQLiteParserAS_) - } - - } - { - p.SetState(1461) - p.Column_alias() - } - - } - - } - - return localctx -} - -// IJoin_operatorContext is an interface to support dynamic dispatch. -type IJoin_operatorContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - COMMA() antlr.TerminalNode - JOIN_() antlr.TerminalNode - NATURAL_() antlr.TerminalNode - LEFT_() antlr.TerminalNode - INNER_() antlr.TerminalNode - CROSS_() antlr.TerminalNode - OUTER_() antlr.TerminalNode - - // IsJoin_operatorContext differentiates from other interfaces. - IsJoin_operatorContext() -} - -type Join_operatorContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyJoin_operatorContext() *Join_operatorContext { - var p = new(Join_operatorContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_join_operator - return p -} - -func (*Join_operatorContext) IsJoin_operatorContext() {} - -func NewJoin_operatorContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Join_operatorContext { - var p = new(Join_operatorContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_join_operator - - return p -} - -func (s *Join_operatorContext) GetParser() antlr.Parser { return s.parser } - -func (s *Join_operatorContext) COMMA() antlr.TerminalNode { - return s.GetToken(SQLiteParserCOMMA, 0) -} - -func (s *Join_operatorContext) JOIN_() antlr.TerminalNode { - return s.GetToken(SQLiteParserJOIN_, 0) -} - -func (s *Join_operatorContext) NATURAL_() antlr.TerminalNode { - return s.GetToken(SQLiteParserNATURAL_, 0) -} - -func (s *Join_operatorContext) LEFT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserLEFT_, 0) -} - -func (s *Join_operatorContext) INNER_() antlr.TerminalNode { - return s.GetToken(SQLiteParserINNER_, 0) -} - -func (s *Join_operatorContext) CROSS_() antlr.TerminalNode { - return s.GetToken(SQLiteParserCROSS_, 0) -} - -func (s *Join_operatorContext) OUTER_() antlr.TerminalNode { - return s.GetToken(SQLiteParserOUTER_, 0) -} - -func (s *Join_operatorContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Join_operatorContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Join_operatorContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterJoin_operator(s) - } -} - -func (s *Join_operatorContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitJoin_operator(s) - } -} - -func (p *SQLiteParser) Join_operator() (localctx IJoin_operatorContext) { - this := p - _ = this - - localctx = NewJoin_operatorContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 102, SQLiteParserRULE_join_operator) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.SetState(1479) - p.GetErrorHandler().Sync(p) - - switch p.GetTokenStream().LA(1) { - case SQLiteParserCOMMA: - p.EnterOuterAlt(localctx, 1) - { - p.SetState(1466) - p.Match(SQLiteParserCOMMA) - } - - case SQLiteParserCROSS_, SQLiteParserINNER_, SQLiteParserJOIN_, SQLiteParserLEFT_, SQLiteParserNATURAL_: - p.EnterOuterAlt(localctx, 2) - p.SetState(1468) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserNATURAL_ { - { - p.SetState(1467) - p.Match(SQLiteParserNATURAL_) - } - - } - p.SetState(1476) - p.GetErrorHandler().Sync(p) - - switch p.GetTokenStream().LA(1) { - case SQLiteParserLEFT_: - { - p.SetState(1470) - p.Match(SQLiteParserLEFT_) - } - p.SetState(1472) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserOUTER_ { - { - p.SetState(1471) - p.Match(SQLiteParserOUTER_) - } - - } - - case SQLiteParserINNER_: - { - p.SetState(1474) - p.Match(SQLiteParserINNER_) - } - - case SQLiteParserCROSS_: - { - p.SetState(1475) - p.Match(SQLiteParserCROSS_) - } - - case SQLiteParserJOIN_: - - default: - } - { - p.SetState(1478) - p.Match(SQLiteParserJOIN_) - } - - default: - panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) - } - - return localctx -} - -// IJoin_constraintContext is an interface to support dynamic dispatch. -type IJoin_constraintContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - ON_() antlr.TerminalNode - Expr() IExprContext - USING_() antlr.TerminalNode - OPEN_PAR() antlr.TerminalNode - AllColumn_name() []IColumn_nameContext - Column_name(i int) IColumn_nameContext - CLOSE_PAR() antlr.TerminalNode - AllCOMMA() []antlr.TerminalNode - COMMA(i int) antlr.TerminalNode - - // IsJoin_constraintContext differentiates from other interfaces. - IsJoin_constraintContext() -} - -type Join_constraintContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyJoin_constraintContext() *Join_constraintContext { - var p = new(Join_constraintContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_join_constraint - return p -} - -func (*Join_constraintContext) IsJoin_constraintContext() {} - -func NewJoin_constraintContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Join_constraintContext { - var p = new(Join_constraintContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_join_constraint - - return p -} - -func (s *Join_constraintContext) GetParser() antlr.Parser { return s.parser } - -func (s *Join_constraintContext) ON_() antlr.TerminalNode { - return s.GetToken(SQLiteParserON_, 0) -} - -func (s *Join_constraintContext) Expr() IExprContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IExprContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IExprContext) -} - -func (s *Join_constraintContext) USING_() antlr.TerminalNode { - return s.GetToken(SQLiteParserUSING_, 0) -} - -func (s *Join_constraintContext) OPEN_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserOPEN_PAR, 0) -} - -func (s *Join_constraintContext) AllColumn_name() []IColumn_nameContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IColumn_nameContext); ok { - len++ - } - } - - tst := make([]IColumn_nameContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IColumn_nameContext); ok { - tst[i] = t.(IColumn_nameContext) - i++ - } - } - - return tst -} - -func (s *Join_constraintContext) Column_name(i int) IColumn_nameContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IColumn_nameContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IColumn_nameContext) -} - -func (s *Join_constraintContext) CLOSE_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserCLOSE_PAR, 0) -} - -func (s *Join_constraintContext) AllCOMMA() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserCOMMA) -} - -func (s *Join_constraintContext) COMMA(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserCOMMA, i) -} - -func (s *Join_constraintContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Join_constraintContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Join_constraintContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterJoin_constraint(s) - } -} - -func (s *Join_constraintContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitJoin_constraint(s) - } -} - -func (p *SQLiteParser) Join_constraint() (localctx IJoin_constraintContext) { - this := p - _ = this - - localctx = NewJoin_constraintContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 104, SQLiteParserRULE_join_constraint) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.SetState(1495) - p.GetErrorHandler().Sync(p) - - switch p.GetTokenStream().LA(1) { - case SQLiteParserON_: - p.EnterOuterAlt(localctx, 1) - { - p.SetState(1481) - p.Match(SQLiteParserON_) - } - { - p.SetState(1482) - p.expr(0) - } - - case SQLiteParserUSING_: - p.EnterOuterAlt(localctx, 2) - { - p.SetState(1483) - p.Match(SQLiteParserUSING_) - } - { - p.SetState(1484) - p.Match(SQLiteParserOPEN_PAR) - } - { - p.SetState(1485) - p.Column_name() - } - p.SetState(1490) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for _la == SQLiteParserCOMMA { - { - p.SetState(1486) - p.Match(SQLiteParserCOMMA) - } - { - p.SetState(1487) - p.Column_name() - } - - p.SetState(1492) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - { - p.SetState(1493) - p.Match(SQLiteParserCLOSE_PAR) - } - - default: - panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) - } - - return localctx -} - -// ICompound_operatorContext is an interface to support dynamic dispatch. -type ICompound_operatorContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - UNION_() antlr.TerminalNode - ALL_() antlr.TerminalNode - INTERSECT_() antlr.TerminalNode - EXCEPT_() antlr.TerminalNode - - // IsCompound_operatorContext differentiates from other interfaces. - IsCompound_operatorContext() -} - -type Compound_operatorContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyCompound_operatorContext() *Compound_operatorContext { - var p = new(Compound_operatorContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_compound_operator - return p -} - -func (*Compound_operatorContext) IsCompound_operatorContext() {} - -func NewCompound_operatorContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Compound_operatorContext { - var p = new(Compound_operatorContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_compound_operator - - return p -} - -func (s *Compound_operatorContext) GetParser() antlr.Parser { return s.parser } - -func (s *Compound_operatorContext) UNION_() antlr.TerminalNode { - return s.GetToken(SQLiteParserUNION_, 0) -} - -func (s *Compound_operatorContext) ALL_() antlr.TerminalNode { - return s.GetToken(SQLiteParserALL_, 0) -} - -func (s *Compound_operatorContext) INTERSECT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserINTERSECT_, 0) -} - -func (s *Compound_operatorContext) EXCEPT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserEXCEPT_, 0) -} - -func (s *Compound_operatorContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Compound_operatorContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Compound_operatorContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterCompound_operator(s) - } -} - -func (s *Compound_operatorContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitCompound_operator(s) - } -} - -func (p *SQLiteParser) Compound_operator() (localctx ICompound_operatorContext) { - this := p - _ = this - - localctx = NewCompound_operatorContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 106, SQLiteParserRULE_compound_operator) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.SetState(1503) - p.GetErrorHandler().Sync(p) - - switch p.GetTokenStream().LA(1) { - case SQLiteParserUNION_: - p.EnterOuterAlt(localctx, 1) - { - p.SetState(1497) - p.Match(SQLiteParserUNION_) - } - p.SetState(1499) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserALL_ { - { - p.SetState(1498) - p.Match(SQLiteParserALL_) - } - - } - - case SQLiteParserINTERSECT_: - p.EnterOuterAlt(localctx, 2) - { - p.SetState(1501) - p.Match(SQLiteParserINTERSECT_) - } - - case SQLiteParserEXCEPT_: - p.EnterOuterAlt(localctx, 3) - { - p.SetState(1502) - p.Match(SQLiteParserEXCEPT_) - } - - default: - panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) - } - - return localctx -} - -// IUpdate_stmtContext is an interface to support dynamic dispatch. -type IUpdate_stmtContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - UPDATE_() antlr.TerminalNode - Qualified_table_name() IQualified_table_nameContext - SET_() antlr.TerminalNode - AllASSIGN() []antlr.TerminalNode - ASSIGN(i int) antlr.TerminalNode - AllExpr() []IExprContext - Expr(i int) IExprContext - AllColumn_name() []IColumn_nameContext - Column_name(i int) IColumn_nameContext - AllColumn_name_list() []IColumn_name_listContext - Column_name_list(i int) IColumn_name_listContext - With_clause() IWith_clauseContext - OR_() antlr.TerminalNode - AllCOMMA() []antlr.TerminalNode - COMMA(i int) antlr.TerminalNode - FROM_() antlr.TerminalNode - WHERE_() antlr.TerminalNode - Returning_clause() IReturning_clauseContext - ROLLBACK_() antlr.TerminalNode - ABORT_() antlr.TerminalNode - REPLACE_() antlr.TerminalNode - FAIL_() antlr.TerminalNode - IGNORE_() antlr.TerminalNode - AllTable_or_subquery() []ITable_or_subqueryContext - Table_or_subquery(i int) ITable_or_subqueryContext - Join_clause() IJoin_clauseContext - - // IsUpdate_stmtContext differentiates from other interfaces. - IsUpdate_stmtContext() -} - -type Update_stmtContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyUpdate_stmtContext() *Update_stmtContext { - var p = new(Update_stmtContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_update_stmt - return p -} - -func (*Update_stmtContext) IsUpdate_stmtContext() {} - -func NewUpdate_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Update_stmtContext { - var p = new(Update_stmtContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_update_stmt - - return p -} - -func (s *Update_stmtContext) GetParser() antlr.Parser { return s.parser } - -func (s *Update_stmtContext) UPDATE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserUPDATE_, 0) -} - -func (s *Update_stmtContext) Qualified_table_name() IQualified_table_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IQualified_table_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IQualified_table_nameContext) -} - -func (s *Update_stmtContext) SET_() antlr.TerminalNode { - return s.GetToken(SQLiteParserSET_, 0) -} - -func (s *Update_stmtContext) AllASSIGN() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserASSIGN) -} - -func (s *Update_stmtContext) ASSIGN(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserASSIGN, i) -} - -func (s *Update_stmtContext) AllExpr() []IExprContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IExprContext); ok { - len++ - } - } - - tst := make([]IExprContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IExprContext); ok { - tst[i] = t.(IExprContext) - i++ - } - } - - return tst -} - -func (s *Update_stmtContext) Expr(i int) IExprContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IExprContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IExprContext) -} - -func (s *Update_stmtContext) AllColumn_name() []IColumn_nameContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IColumn_nameContext); ok { - len++ - } - } - - tst := make([]IColumn_nameContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IColumn_nameContext); ok { - tst[i] = t.(IColumn_nameContext) - i++ - } - } - - return tst -} - -func (s *Update_stmtContext) Column_name(i int) IColumn_nameContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IColumn_nameContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IColumn_nameContext) -} - -func (s *Update_stmtContext) AllColumn_name_list() []IColumn_name_listContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IColumn_name_listContext); ok { - len++ - } - } - - tst := make([]IColumn_name_listContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IColumn_name_listContext); ok { - tst[i] = t.(IColumn_name_listContext) - i++ - } - } - - return tst -} - -func (s *Update_stmtContext) Column_name_list(i int) IColumn_name_listContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IColumn_name_listContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IColumn_name_listContext) -} - -func (s *Update_stmtContext) With_clause() IWith_clauseContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IWith_clauseContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IWith_clauseContext) -} - -func (s *Update_stmtContext) OR_() antlr.TerminalNode { - return s.GetToken(SQLiteParserOR_, 0) -} - -func (s *Update_stmtContext) AllCOMMA() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserCOMMA) -} - -func (s *Update_stmtContext) COMMA(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserCOMMA, i) -} - -func (s *Update_stmtContext) FROM_() antlr.TerminalNode { - return s.GetToken(SQLiteParserFROM_, 0) -} - -func (s *Update_stmtContext) WHERE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserWHERE_, 0) -} - -func (s *Update_stmtContext) Returning_clause() IReturning_clauseContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IReturning_clauseContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IReturning_clauseContext) -} - -func (s *Update_stmtContext) ROLLBACK_() antlr.TerminalNode { - return s.GetToken(SQLiteParserROLLBACK_, 0) -} - -func (s *Update_stmtContext) ABORT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserABORT_, 0) -} - -func (s *Update_stmtContext) REPLACE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserREPLACE_, 0) -} - -func (s *Update_stmtContext) FAIL_() antlr.TerminalNode { - return s.GetToken(SQLiteParserFAIL_, 0) -} - -func (s *Update_stmtContext) IGNORE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserIGNORE_, 0) -} - -func (s *Update_stmtContext) AllTable_or_subquery() []ITable_or_subqueryContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(ITable_or_subqueryContext); ok { - len++ - } - } - - tst := make([]ITable_or_subqueryContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(ITable_or_subqueryContext); ok { - tst[i] = t.(ITable_or_subqueryContext) - i++ - } - } - - return tst -} - -func (s *Update_stmtContext) Table_or_subquery(i int) ITable_or_subqueryContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ITable_or_subqueryContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(ITable_or_subqueryContext) -} - -func (s *Update_stmtContext) Join_clause() IJoin_clauseContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IJoin_clauseContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IJoin_clauseContext) -} - -func (s *Update_stmtContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Update_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Update_stmtContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterUpdate_stmt(s) - } -} - -func (s *Update_stmtContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitUpdate_stmt(s) - } -} - -func (p *SQLiteParser) Update_stmt() (localctx IUpdate_stmtContext) { - this := p - _ = this - - localctx = NewUpdate_stmtContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 108, SQLiteParserRULE_update_stmt) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - p.SetState(1506) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserWITH_ { - { - p.SetState(1505) - p.With_clause() - } - - } - { - p.SetState(1508) - p.Match(SQLiteParserUPDATE_) - } - p.SetState(1511) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 218, p.GetParserRuleContext()) == 1 { - { - p.SetState(1509) - p.Match(SQLiteParserOR_) - } - { - p.SetState(1510) - _la = p.GetTokenStream().LA(1) - - if !(_la == SQLiteParserABORT_ || ((int64((_la-72)) & ^0x3f) == 0 && ((int64(1)<<(_la-72))&19140298416325121) != 0)) { - p.GetErrorHandler().RecoverInline(p) - } else { - p.GetErrorHandler().ReportMatch(p) - p.Consume() - } - } - - } - { - p.SetState(1513) - p.Qualified_table_name() - } - { - p.SetState(1514) - p.Match(SQLiteParserSET_) - } - p.SetState(1517) - p.GetErrorHandler().Sync(p) - switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 219, p.GetParserRuleContext()) { - case 1: - { - p.SetState(1515) - p.Column_name() - } - - case 2: - { - p.SetState(1516) - p.Column_name_list() - } - - } - { - p.SetState(1519) - p.Match(SQLiteParserASSIGN) - } - { - p.SetState(1520) - p.expr(0) - } - p.SetState(1531) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for _la == SQLiteParserCOMMA { - { - p.SetState(1521) - p.Match(SQLiteParserCOMMA) - } - p.SetState(1524) - p.GetErrorHandler().Sync(p) - switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 220, p.GetParserRuleContext()) { - case 1: - { - p.SetState(1522) - p.Column_name() - } - - case 2: - { - p.SetState(1523) - p.Column_name_list() - } - - } - { - p.SetState(1526) - p.Match(SQLiteParserASSIGN) - } - { - p.SetState(1527) - p.expr(0) - } - - p.SetState(1533) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - p.SetState(1546) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserFROM_ { - { - p.SetState(1534) - p.Match(SQLiteParserFROM_) - } - p.SetState(1544) - p.GetErrorHandler().Sync(p) - switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 223, p.GetParserRuleContext()) { - case 1: - { - p.SetState(1535) - p.Table_or_subquery() - } - p.SetState(1540) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for _la == SQLiteParserCOMMA { - { - p.SetState(1536) - p.Match(SQLiteParserCOMMA) - } - { - p.SetState(1537) - p.Table_or_subquery() - } - - p.SetState(1542) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - - case 2: - { - p.SetState(1543) - p.Join_clause() - } - - } - - } - p.SetState(1550) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserWHERE_ { - { - p.SetState(1548) - p.Match(SQLiteParserWHERE_) - } - { - p.SetState(1549) - p.expr(0) - } - - } - p.SetState(1553) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserRETURNING_ { - { - p.SetState(1552) - p.Returning_clause() - } - - } - - return localctx -} - -// IColumn_name_listContext is an interface to support dynamic dispatch. -type IColumn_name_listContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - OPEN_PAR() antlr.TerminalNode - AllColumn_name() []IColumn_nameContext - Column_name(i int) IColumn_nameContext - CLOSE_PAR() antlr.TerminalNode - AllCOMMA() []antlr.TerminalNode - COMMA(i int) antlr.TerminalNode - - // IsColumn_name_listContext differentiates from other interfaces. - IsColumn_name_listContext() -} - -type Column_name_listContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyColumn_name_listContext() *Column_name_listContext { - var p = new(Column_name_listContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_column_name_list - return p -} - -func (*Column_name_listContext) IsColumn_name_listContext() {} - -func NewColumn_name_listContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Column_name_listContext { - var p = new(Column_name_listContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_column_name_list - - return p -} - -func (s *Column_name_listContext) GetParser() antlr.Parser { return s.parser } - -func (s *Column_name_listContext) OPEN_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserOPEN_PAR, 0) -} - -func (s *Column_name_listContext) AllColumn_name() []IColumn_nameContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IColumn_nameContext); ok { - len++ - } - } - - tst := make([]IColumn_nameContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IColumn_nameContext); ok { - tst[i] = t.(IColumn_nameContext) - i++ - } - } - - return tst -} - -func (s *Column_name_listContext) Column_name(i int) IColumn_nameContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IColumn_nameContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IColumn_nameContext) -} - -func (s *Column_name_listContext) CLOSE_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserCLOSE_PAR, 0) -} - -func (s *Column_name_listContext) AllCOMMA() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserCOMMA) -} - -func (s *Column_name_listContext) COMMA(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserCOMMA, i) -} - -func (s *Column_name_listContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Column_name_listContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Column_name_listContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterColumn_name_list(s) - } -} - -func (s *Column_name_listContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitColumn_name_list(s) - } -} - -func (p *SQLiteParser) Column_name_list() (localctx IColumn_name_listContext) { - this := p - _ = this - - localctx = NewColumn_name_listContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 110, SQLiteParserRULE_column_name_list) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(1555) - p.Match(SQLiteParserOPEN_PAR) - } - { - p.SetState(1556) - p.Column_name() - } - p.SetState(1561) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for _la == SQLiteParserCOMMA { - { - p.SetState(1557) - p.Match(SQLiteParserCOMMA) - } - { - p.SetState(1558) - p.Column_name() - } - - p.SetState(1563) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - { - p.SetState(1564) - p.Match(SQLiteParserCLOSE_PAR) - } - - return localctx -} - -// IUpdate_stmt_limitedContext is an interface to support dynamic dispatch. -type IUpdate_stmt_limitedContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - UPDATE_() antlr.TerminalNode - Qualified_table_name() IQualified_table_nameContext - SET_() antlr.TerminalNode - AllASSIGN() []antlr.TerminalNode - ASSIGN(i int) antlr.TerminalNode - AllExpr() []IExprContext - Expr(i int) IExprContext - AllColumn_name() []IColumn_nameContext - Column_name(i int) IColumn_nameContext - AllColumn_name_list() []IColumn_name_listContext - Column_name_list(i int) IColumn_name_listContext - With_clause() IWith_clauseContext - OR_() antlr.TerminalNode - AllCOMMA() []antlr.TerminalNode - COMMA(i int) antlr.TerminalNode - WHERE_() antlr.TerminalNode - Returning_clause() IReturning_clauseContext - Limit_stmt() ILimit_stmtContext - ROLLBACK_() antlr.TerminalNode - ABORT_() antlr.TerminalNode - REPLACE_() antlr.TerminalNode - FAIL_() antlr.TerminalNode - IGNORE_() antlr.TerminalNode - Order_by_stmt() IOrder_by_stmtContext - - // IsUpdate_stmt_limitedContext differentiates from other interfaces. - IsUpdate_stmt_limitedContext() -} - -type Update_stmt_limitedContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyUpdate_stmt_limitedContext() *Update_stmt_limitedContext { - var p = new(Update_stmt_limitedContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_update_stmt_limited - return p -} - -func (*Update_stmt_limitedContext) IsUpdate_stmt_limitedContext() {} - -func NewUpdate_stmt_limitedContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Update_stmt_limitedContext { - var p = new(Update_stmt_limitedContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_update_stmt_limited - - return p -} - -func (s *Update_stmt_limitedContext) GetParser() antlr.Parser { return s.parser } - -func (s *Update_stmt_limitedContext) UPDATE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserUPDATE_, 0) -} - -func (s *Update_stmt_limitedContext) Qualified_table_name() IQualified_table_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IQualified_table_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IQualified_table_nameContext) -} - -func (s *Update_stmt_limitedContext) SET_() antlr.TerminalNode { - return s.GetToken(SQLiteParserSET_, 0) -} - -func (s *Update_stmt_limitedContext) AllASSIGN() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserASSIGN) -} - -func (s *Update_stmt_limitedContext) ASSIGN(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserASSIGN, i) -} - -func (s *Update_stmt_limitedContext) AllExpr() []IExprContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IExprContext); ok { - len++ - } - } - - tst := make([]IExprContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IExprContext); ok { - tst[i] = t.(IExprContext) - i++ - } - } - - return tst -} - -func (s *Update_stmt_limitedContext) Expr(i int) IExprContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IExprContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IExprContext) -} - -func (s *Update_stmt_limitedContext) AllColumn_name() []IColumn_nameContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IColumn_nameContext); ok { - len++ - } - } - - tst := make([]IColumn_nameContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IColumn_nameContext); ok { - tst[i] = t.(IColumn_nameContext) - i++ - } - } - - return tst -} - -func (s *Update_stmt_limitedContext) Column_name(i int) IColumn_nameContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IColumn_nameContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IColumn_nameContext) -} - -func (s *Update_stmt_limitedContext) AllColumn_name_list() []IColumn_name_listContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IColumn_name_listContext); ok { - len++ - } - } - - tst := make([]IColumn_name_listContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IColumn_name_listContext); ok { - tst[i] = t.(IColumn_name_listContext) - i++ - } - } - - return tst -} - -func (s *Update_stmt_limitedContext) Column_name_list(i int) IColumn_name_listContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IColumn_name_listContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IColumn_name_listContext) -} - -func (s *Update_stmt_limitedContext) With_clause() IWith_clauseContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IWith_clauseContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IWith_clauseContext) -} - -func (s *Update_stmt_limitedContext) OR_() antlr.TerminalNode { - return s.GetToken(SQLiteParserOR_, 0) -} - -func (s *Update_stmt_limitedContext) AllCOMMA() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserCOMMA) -} - -func (s *Update_stmt_limitedContext) COMMA(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserCOMMA, i) -} - -func (s *Update_stmt_limitedContext) WHERE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserWHERE_, 0) -} - -func (s *Update_stmt_limitedContext) Returning_clause() IReturning_clauseContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IReturning_clauseContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IReturning_clauseContext) -} - -func (s *Update_stmt_limitedContext) Limit_stmt() ILimit_stmtContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ILimit_stmtContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ILimit_stmtContext) -} - -func (s *Update_stmt_limitedContext) ROLLBACK_() antlr.TerminalNode { - return s.GetToken(SQLiteParserROLLBACK_, 0) -} - -func (s *Update_stmt_limitedContext) ABORT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserABORT_, 0) -} - -func (s *Update_stmt_limitedContext) REPLACE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserREPLACE_, 0) -} - -func (s *Update_stmt_limitedContext) FAIL_() antlr.TerminalNode { - return s.GetToken(SQLiteParserFAIL_, 0) -} - -func (s *Update_stmt_limitedContext) IGNORE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserIGNORE_, 0) -} - -func (s *Update_stmt_limitedContext) Order_by_stmt() IOrder_by_stmtContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IOrder_by_stmtContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IOrder_by_stmtContext) -} - -func (s *Update_stmt_limitedContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Update_stmt_limitedContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Update_stmt_limitedContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterUpdate_stmt_limited(s) - } -} - -func (s *Update_stmt_limitedContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitUpdate_stmt_limited(s) - } -} - -func (p *SQLiteParser) Update_stmt_limited() (localctx IUpdate_stmt_limitedContext) { - this := p - _ = this - - localctx = NewUpdate_stmt_limitedContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 112, SQLiteParserRULE_update_stmt_limited) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - p.SetState(1567) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserWITH_ { - { - p.SetState(1566) - p.With_clause() - } - - } - { - p.SetState(1569) - p.Match(SQLiteParserUPDATE_) - } - p.SetState(1572) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 229, p.GetParserRuleContext()) == 1 { - { - p.SetState(1570) - p.Match(SQLiteParserOR_) - } - { - p.SetState(1571) - _la = p.GetTokenStream().LA(1) - - if !(_la == SQLiteParserABORT_ || ((int64((_la-72)) & ^0x3f) == 0 && ((int64(1)<<(_la-72))&19140298416325121) != 0)) { - p.GetErrorHandler().RecoverInline(p) - } else { - p.GetErrorHandler().ReportMatch(p) - p.Consume() - } - } - - } - { - p.SetState(1574) - p.Qualified_table_name() - } - { - p.SetState(1575) - p.Match(SQLiteParserSET_) - } - p.SetState(1578) - p.GetErrorHandler().Sync(p) - switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 230, p.GetParserRuleContext()) { - case 1: - { - p.SetState(1576) - p.Column_name() - } - - case 2: - { - p.SetState(1577) - p.Column_name_list() - } - - } - { - p.SetState(1580) - p.Match(SQLiteParserASSIGN) - } - { - p.SetState(1581) - p.expr(0) - } - p.SetState(1592) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for _la == SQLiteParserCOMMA { - { - p.SetState(1582) - p.Match(SQLiteParserCOMMA) - } - p.SetState(1585) - p.GetErrorHandler().Sync(p) - switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 231, p.GetParserRuleContext()) { - case 1: - { - p.SetState(1583) - p.Column_name() - } - - case 2: - { - p.SetState(1584) - p.Column_name_list() - } - - } - { - p.SetState(1587) - p.Match(SQLiteParserASSIGN) - } - { - p.SetState(1588) - p.expr(0) - } - - p.SetState(1594) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - p.SetState(1597) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserWHERE_ { - { - p.SetState(1595) - p.Match(SQLiteParserWHERE_) - } - { - p.SetState(1596) - p.expr(0) - } - - } - p.SetState(1600) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserRETURNING_ { - { - p.SetState(1599) - p.Returning_clause() - } - - } - p.SetState(1606) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserLIMIT_ || _la == SQLiteParserORDER_ { - p.SetState(1603) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserORDER_ { - { - p.SetState(1602) - p.Order_by_stmt() - } - - } - { - p.SetState(1605) - p.Limit_stmt() - } - - } - - return localctx -} - -// IQualified_table_nameContext is an interface to support dynamic dispatch. -type IQualified_table_nameContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - Table_name() ITable_nameContext - Schema_name() ISchema_nameContext - DOT() antlr.TerminalNode - AS_() antlr.TerminalNode - Alias() IAliasContext - INDEXED_() antlr.TerminalNode - BY_() antlr.TerminalNode - Index_name() IIndex_nameContext - NOT_() antlr.TerminalNode - - // IsQualified_table_nameContext differentiates from other interfaces. - IsQualified_table_nameContext() -} - -type Qualified_table_nameContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyQualified_table_nameContext() *Qualified_table_nameContext { - var p = new(Qualified_table_nameContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_qualified_table_name - return p -} - -func (*Qualified_table_nameContext) IsQualified_table_nameContext() {} - -func NewQualified_table_nameContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Qualified_table_nameContext { - var p = new(Qualified_table_nameContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_qualified_table_name - - return p -} - -func (s *Qualified_table_nameContext) GetParser() antlr.Parser { return s.parser } - -func (s *Qualified_table_nameContext) Table_name() ITable_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ITable_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ITable_nameContext) -} - -func (s *Qualified_table_nameContext) Schema_name() ISchema_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ISchema_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ISchema_nameContext) -} - -func (s *Qualified_table_nameContext) DOT() antlr.TerminalNode { - return s.GetToken(SQLiteParserDOT, 0) -} - -func (s *Qualified_table_nameContext) AS_() antlr.TerminalNode { - return s.GetToken(SQLiteParserAS_, 0) -} - -func (s *Qualified_table_nameContext) Alias() IAliasContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IAliasContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IAliasContext) -} - -func (s *Qualified_table_nameContext) INDEXED_() antlr.TerminalNode { - return s.GetToken(SQLiteParserINDEXED_, 0) -} - -func (s *Qualified_table_nameContext) BY_() antlr.TerminalNode { - return s.GetToken(SQLiteParserBY_, 0) -} - -func (s *Qualified_table_nameContext) Index_name() IIndex_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IIndex_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IIndex_nameContext) -} - -func (s *Qualified_table_nameContext) NOT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserNOT_, 0) -} - -func (s *Qualified_table_nameContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Qualified_table_nameContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Qualified_table_nameContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterQualified_table_name(s) - } -} - -func (s *Qualified_table_nameContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitQualified_table_name(s) - } -} - -func (p *SQLiteParser) Qualified_table_name() (localctx IQualified_table_nameContext) { - this := p - _ = this - - localctx = NewQualified_table_nameContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 114, SQLiteParserRULE_qualified_table_name) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - p.SetState(1611) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 237, p.GetParserRuleContext()) == 1 { - { - p.SetState(1608) - p.Schema_name() - } - { - p.SetState(1609) - p.Match(SQLiteParserDOT) - } - - } - { - p.SetState(1613) - p.Table_name() - } - p.SetState(1616) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserAS_ { - { - p.SetState(1614) - p.Match(SQLiteParserAS_) - } - { - p.SetState(1615) - p.Alias() - } - - } - p.SetState(1623) - p.GetErrorHandler().Sync(p) - - switch p.GetTokenStream().LA(1) { - case SQLiteParserINDEXED_: - { - p.SetState(1618) - p.Match(SQLiteParserINDEXED_) - } - { - p.SetState(1619) - p.Match(SQLiteParserBY_) - } - { - p.SetState(1620) - p.Index_name() - } - - case SQLiteParserNOT_: - { - p.SetState(1621) - p.Match(SQLiteParserNOT_) - } - { - p.SetState(1622) - p.Match(SQLiteParserINDEXED_) - } - - case SQLiteParserEOF, SQLiteParserSCOL, SQLiteParserALTER_, SQLiteParserANALYZE_, SQLiteParserATTACH_, SQLiteParserBEGIN_, SQLiteParserCOMMIT_, SQLiteParserCREATE_, SQLiteParserDELETE_, SQLiteParserDETACH_, SQLiteParserDROP_, SQLiteParserEND_, SQLiteParserEXPLAIN_, SQLiteParserINSERT_, SQLiteParserLIMIT_, SQLiteParserORDER_, SQLiteParserPRAGMA_, SQLiteParserREINDEX_, SQLiteParserRELEASE_, SQLiteParserREPLACE_, SQLiteParserRETURNING_, SQLiteParserROLLBACK_, SQLiteParserSAVEPOINT_, SQLiteParserSELECT_, SQLiteParserSET_, SQLiteParserUPDATE_, SQLiteParserVACUUM_, SQLiteParserVALUES_, SQLiteParserWHERE_, SQLiteParserWITH_: - - default: - } - - return localctx -} - -// IVacuum_stmtContext is an interface to support dynamic dispatch. -type IVacuum_stmtContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - VACUUM_() antlr.TerminalNode - Schema_name() ISchema_nameContext - INTO_() antlr.TerminalNode - Filename() IFilenameContext - - // IsVacuum_stmtContext differentiates from other interfaces. - IsVacuum_stmtContext() -} - -type Vacuum_stmtContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyVacuum_stmtContext() *Vacuum_stmtContext { - var p = new(Vacuum_stmtContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_vacuum_stmt - return p -} - -func (*Vacuum_stmtContext) IsVacuum_stmtContext() {} - -func NewVacuum_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Vacuum_stmtContext { - var p = new(Vacuum_stmtContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_vacuum_stmt - - return p -} - -func (s *Vacuum_stmtContext) GetParser() antlr.Parser { return s.parser } - -func (s *Vacuum_stmtContext) VACUUM_() antlr.TerminalNode { - return s.GetToken(SQLiteParserVACUUM_, 0) -} - -func (s *Vacuum_stmtContext) Schema_name() ISchema_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ISchema_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ISchema_nameContext) -} - -func (s *Vacuum_stmtContext) INTO_() antlr.TerminalNode { - return s.GetToken(SQLiteParserINTO_, 0) -} - -func (s *Vacuum_stmtContext) Filename() IFilenameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IFilenameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IFilenameContext) -} - -func (s *Vacuum_stmtContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Vacuum_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Vacuum_stmtContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterVacuum_stmt(s) - } -} - -func (s *Vacuum_stmtContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitVacuum_stmt(s) - } -} - -func (p *SQLiteParser) Vacuum_stmt() (localctx IVacuum_stmtContext) { - this := p - _ = this - - localctx = NewVacuum_stmtContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 116, SQLiteParserRULE_vacuum_stmt) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(1625) - p.Match(SQLiteParserVACUUM_) - } - p.SetState(1627) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 240, p.GetParserRuleContext()) == 1 { - { - p.SetState(1626) - p.Schema_name() - } - - } - p.SetState(1631) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserINTO_ { - { - p.SetState(1629) - p.Match(SQLiteParserINTO_) - } - { - p.SetState(1630) - p.Filename() - } - - } - - return localctx -} - -// IFilter_clauseContext is an interface to support dynamic dispatch. -type IFilter_clauseContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - FILTER_() antlr.TerminalNode - OPEN_PAR() antlr.TerminalNode - WHERE_() antlr.TerminalNode - Expr() IExprContext - CLOSE_PAR() antlr.TerminalNode - - // IsFilter_clauseContext differentiates from other interfaces. - IsFilter_clauseContext() -} - -type Filter_clauseContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyFilter_clauseContext() *Filter_clauseContext { - var p = new(Filter_clauseContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_filter_clause - return p -} - -func (*Filter_clauseContext) IsFilter_clauseContext() {} - -func NewFilter_clauseContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Filter_clauseContext { - var p = new(Filter_clauseContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_filter_clause - - return p -} - -func (s *Filter_clauseContext) GetParser() antlr.Parser { return s.parser } - -func (s *Filter_clauseContext) FILTER_() antlr.TerminalNode { - return s.GetToken(SQLiteParserFILTER_, 0) -} - -func (s *Filter_clauseContext) OPEN_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserOPEN_PAR, 0) -} - -func (s *Filter_clauseContext) WHERE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserWHERE_, 0) -} - -func (s *Filter_clauseContext) Expr() IExprContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IExprContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IExprContext) -} - -func (s *Filter_clauseContext) CLOSE_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserCLOSE_PAR, 0) -} - -func (s *Filter_clauseContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Filter_clauseContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Filter_clauseContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterFilter_clause(s) - } -} - -func (s *Filter_clauseContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitFilter_clause(s) - } -} - -func (p *SQLiteParser) Filter_clause() (localctx IFilter_clauseContext) { - this := p - _ = this - - localctx = NewFilter_clauseContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 118, SQLiteParserRULE_filter_clause) - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(1633) - p.Match(SQLiteParserFILTER_) - } - { - p.SetState(1634) - p.Match(SQLiteParserOPEN_PAR) - } - { - p.SetState(1635) - p.Match(SQLiteParserWHERE_) - } - { - p.SetState(1636) - p.expr(0) - } - { - p.SetState(1637) - p.Match(SQLiteParserCLOSE_PAR) - } - - return localctx -} - -// IWindow_defnContext is an interface to support dynamic dispatch. -type IWindow_defnContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - OPEN_PAR() antlr.TerminalNode - CLOSE_PAR() antlr.TerminalNode - ORDER_() antlr.TerminalNode - AllBY_() []antlr.TerminalNode - BY_(i int) antlr.TerminalNode - AllOrdering_term() []IOrdering_termContext - Ordering_term(i int) IOrdering_termContext - Base_window_name() IBase_window_nameContext - PARTITION_() antlr.TerminalNode - AllExpr() []IExprContext - Expr(i int) IExprContext - Frame_spec() IFrame_specContext - AllCOMMA() []antlr.TerminalNode - COMMA(i int) antlr.TerminalNode - - // IsWindow_defnContext differentiates from other interfaces. - IsWindow_defnContext() -} - -type Window_defnContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyWindow_defnContext() *Window_defnContext { - var p = new(Window_defnContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_window_defn - return p -} - -func (*Window_defnContext) IsWindow_defnContext() {} - -func NewWindow_defnContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Window_defnContext { - var p = new(Window_defnContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_window_defn - - return p -} - -func (s *Window_defnContext) GetParser() antlr.Parser { return s.parser } - -func (s *Window_defnContext) OPEN_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserOPEN_PAR, 0) -} - -func (s *Window_defnContext) CLOSE_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserCLOSE_PAR, 0) -} - -func (s *Window_defnContext) ORDER_() antlr.TerminalNode { - return s.GetToken(SQLiteParserORDER_, 0) -} - -func (s *Window_defnContext) AllBY_() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserBY_) -} - -func (s *Window_defnContext) BY_(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserBY_, i) -} - -func (s *Window_defnContext) AllOrdering_term() []IOrdering_termContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IOrdering_termContext); ok { - len++ - } - } - - tst := make([]IOrdering_termContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IOrdering_termContext); ok { - tst[i] = t.(IOrdering_termContext) - i++ - } - } - - return tst -} - -func (s *Window_defnContext) Ordering_term(i int) IOrdering_termContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IOrdering_termContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IOrdering_termContext) -} - -func (s *Window_defnContext) Base_window_name() IBase_window_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IBase_window_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IBase_window_nameContext) -} - -func (s *Window_defnContext) PARTITION_() antlr.TerminalNode { - return s.GetToken(SQLiteParserPARTITION_, 0) -} - -func (s *Window_defnContext) AllExpr() []IExprContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IExprContext); ok { - len++ - } - } - - tst := make([]IExprContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IExprContext); ok { - tst[i] = t.(IExprContext) - i++ - } - } - - return tst -} - -func (s *Window_defnContext) Expr(i int) IExprContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IExprContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IExprContext) -} - -func (s *Window_defnContext) Frame_spec() IFrame_specContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IFrame_specContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IFrame_specContext) -} - -func (s *Window_defnContext) AllCOMMA() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserCOMMA) -} - -func (s *Window_defnContext) COMMA(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserCOMMA, i) -} - -func (s *Window_defnContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Window_defnContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Window_defnContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterWindow_defn(s) - } -} - -func (s *Window_defnContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitWindow_defn(s) - } -} - -func (p *SQLiteParser) Window_defn() (localctx IWindow_defnContext) { - this := p - _ = this - - localctx = NewWindow_defnContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 120, SQLiteParserRULE_window_defn) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(1639) - p.Match(SQLiteParserOPEN_PAR) - } - p.SetState(1641) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 242, p.GetParserRuleContext()) == 1 { - { - p.SetState(1640) - p.Base_window_name() - } - - } - p.SetState(1653) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserPARTITION_ { - { - p.SetState(1643) - p.Match(SQLiteParserPARTITION_) - } - { - p.SetState(1644) - p.Match(SQLiteParserBY_) - } - { - p.SetState(1645) - p.expr(0) - } - p.SetState(1650) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for _la == SQLiteParserCOMMA { - { - p.SetState(1646) - p.Match(SQLiteParserCOMMA) - } - { - p.SetState(1647) - p.expr(0) - } - - p.SetState(1652) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - - } - - { - p.SetState(1655) - p.Match(SQLiteParserORDER_) - } - { - p.SetState(1656) - p.Match(SQLiteParserBY_) - } - { - p.SetState(1657) - p.Ordering_term() - } - p.SetState(1662) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for _la == SQLiteParserCOMMA { - { - p.SetState(1658) - p.Match(SQLiteParserCOMMA) - } - { - p.SetState(1659) - p.Ordering_term() - } - - p.SetState(1664) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - - p.SetState(1666) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if (int64((_la-128)) & ^0x3f) == 0 && ((int64(1)<<(_la-128))&2251799880794113) != 0 { - { - p.SetState(1665) - p.Frame_spec() - } - - } - { - p.SetState(1668) - p.Match(SQLiteParserCLOSE_PAR) - } - - return localctx -} - -// IOver_clauseContext is an interface to support dynamic dispatch. -type IOver_clauseContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - OVER_() antlr.TerminalNode - Window_name() IWindow_nameContext - OPEN_PAR() antlr.TerminalNode - CLOSE_PAR() antlr.TerminalNode - Base_window_name() IBase_window_nameContext - PARTITION_() antlr.TerminalNode - AllBY_() []antlr.TerminalNode - BY_(i int) antlr.TerminalNode - AllExpr() []IExprContext - Expr(i int) IExprContext - ORDER_() antlr.TerminalNode - AllOrdering_term() []IOrdering_termContext - Ordering_term(i int) IOrdering_termContext - Frame_spec() IFrame_specContext - AllCOMMA() []antlr.TerminalNode - COMMA(i int) antlr.TerminalNode - - // IsOver_clauseContext differentiates from other interfaces. - IsOver_clauseContext() -} - -type Over_clauseContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyOver_clauseContext() *Over_clauseContext { - var p = new(Over_clauseContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_over_clause - return p -} - -func (*Over_clauseContext) IsOver_clauseContext() {} - -func NewOver_clauseContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Over_clauseContext { - var p = new(Over_clauseContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_over_clause - - return p -} - -func (s *Over_clauseContext) GetParser() antlr.Parser { return s.parser } - -func (s *Over_clauseContext) OVER_() antlr.TerminalNode { - return s.GetToken(SQLiteParserOVER_, 0) -} - -func (s *Over_clauseContext) Window_name() IWindow_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IWindow_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IWindow_nameContext) -} - -func (s *Over_clauseContext) OPEN_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserOPEN_PAR, 0) -} - -func (s *Over_clauseContext) CLOSE_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserCLOSE_PAR, 0) -} - -func (s *Over_clauseContext) Base_window_name() IBase_window_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IBase_window_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IBase_window_nameContext) -} - -func (s *Over_clauseContext) PARTITION_() antlr.TerminalNode { - return s.GetToken(SQLiteParserPARTITION_, 0) -} - -func (s *Over_clauseContext) AllBY_() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserBY_) -} - -func (s *Over_clauseContext) BY_(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserBY_, i) -} - -func (s *Over_clauseContext) AllExpr() []IExprContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IExprContext); ok { - len++ - } - } - - tst := make([]IExprContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IExprContext); ok { - tst[i] = t.(IExprContext) - i++ - } - } - - return tst -} - -func (s *Over_clauseContext) Expr(i int) IExprContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IExprContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IExprContext) -} - -func (s *Over_clauseContext) ORDER_() antlr.TerminalNode { - return s.GetToken(SQLiteParserORDER_, 0) -} - -func (s *Over_clauseContext) AllOrdering_term() []IOrdering_termContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IOrdering_termContext); ok { - len++ - } - } - - tst := make([]IOrdering_termContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IOrdering_termContext); ok { - tst[i] = t.(IOrdering_termContext) - i++ - } - } - - return tst -} - -func (s *Over_clauseContext) Ordering_term(i int) IOrdering_termContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IOrdering_termContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IOrdering_termContext) -} - -func (s *Over_clauseContext) Frame_spec() IFrame_specContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IFrame_specContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IFrame_specContext) -} - -func (s *Over_clauseContext) AllCOMMA() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserCOMMA) -} - -func (s *Over_clauseContext) COMMA(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserCOMMA, i) -} - -func (s *Over_clauseContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Over_clauseContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Over_clauseContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterOver_clause(s) - } -} - -func (s *Over_clauseContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitOver_clause(s) - } -} - -func (p *SQLiteParser) Over_clause() (localctx IOver_clauseContext) { - this := p - _ = this - - localctx = NewOver_clauseContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 122, SQLiteParserRULE_over_clause) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(1670) - p.Match(SQLiteParserOVER_) - } - p.SetState(1704) - p.GetErrorHandler().Sync(p) - switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 253, p.GetParserRuleContext()) { - case 1: - { - p.SetState(1671) - p.Window_name() - } - - case 2: - { - p.SetState(1672) - p.Match(SQLiteParserOPEN_PAR) - } - p.SetState(1674) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 247, p.GetParserRuleContext()) == 1 { - { - p.SetState(1673) - p.Base_window_name() - } - - } - p.SetState(1686) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserPARTITION_ { - { - p.SetState(1676) - p.Match(SQLiteParserPARTITION_) - } - { - p.SetState(1677) - p.Match(SQLiteParserBY_) - } - { - p.SetState(1678) - p.expr(0) - } - p.SetState(1683) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for _la == SQLiteParserCOMMA { - { - p.SetState(1679) - p.Match(SQLiteParserCOMMA) - } - { - p.SetState(1680) - p.expr(0) - } - - p.SetState(1685) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - - } - p.SetState(1698) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserORDER_ { - { - p.SetState(1688) - p.Match(SQLiteParserORDER_) - } - { - p.SetState(1689) - p.Match(SQLiteParserBY_) - } - { - p.SetState(1690) - p.Ordering_term() - } - p.SetState(1695) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for _la == SQLiteParserCOMMA { - { - p.SetState(1691) - p.Match(SQLiteParserCOMMA) - } - { - p.SetState(1692) - p.Ordering_term() - } - - p.SetState(1697) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - - } - p.SetState(1701) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if (int64((_la-128)) & ^0x3f) == 0 && ((int64(1)<<(_la-128))&2251799880794113) != 0 { - { - p.SetState(1700) - p.Frame_spec() - } - - } - { - p.SetState(1703) - p.Match(SQLiteParserCLOSE_PAR) - } - - } - - return localctx -} - -// IFrame_specContext is an interface to support dynamic dispatch. -type IFrame_specContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - Frame_clause() IFrame_clauseContext - EXCLUDE_() antlr.TerminalNode - NO_() antlr.TerminalNode - OTHERS_() antlr.TerminalNode - CURRENT_() antlr.TerminalNode - ROW_() antlr.TerminalNode - GROUP_() antlr.TerminalNode - TIES_() antlr.TerminalNode - - // IsFrame_specContext differentiates from other interfaces. - IsFrame_specContext() -} - -type Frame_specContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyFrame_specContext() *Frame_specContext { - var p = new(Frame_specContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_frame_spec - return p -} - -func (*Frame_specContext) IsFrame_specContext() {} - -func NewFrame_specContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Frame_specContext { - var p = new(Frame_specContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_frame_spec - - return p -} - -func (s *Frame_specContext) GetParser() antlr.Parser { return s.parser } - -func (s *Frame_specContext) Frame_clause() IFrame_clauseContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IFrame_clauseContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IFrame_clauseContext) -} - -func (s *Frame_specContext) EXCLUDE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserEXCLUDE_, 0) -} - -func (s *Frame_specContext) NO_() antlr.TerminalNode { - return s.GetToken(SQLiteParserNO_, 0) -} - -func (s *Frame_specContext) OTHERS_() antlr.TerminalNode { - return s.GetToken(SQLiteParserOTHERS_, 0) -} - -func (s *Frame_specContext) CURRENT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserCURRENT_, 0) -} - -func (s *Frame_specContext) ROW_() antlr.TerminalNode { - return s.GetToken(SQLiteParserROW_, 0) -} - -func (s *Frame_specContext) GROUP_() antlr.TerminalNode { - return s.GetToken(SQLiteParserGROUP_, 0) -} - -func (s *Frame_specContext) TIES_() antlr.TerminalNode { - return s.GetToken(SQLiteParserTIES_, 0) -} - -func (s *Frame_specContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Frame_specContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Frame_specContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterFrame_spec(s) - } -} - -func (s *Frame_specContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitFrame_spec(s) - } -} - -func (p *SQLiteParser) Frame_spec() (localctx IFrame_specContext) { - this := p - _ = this - - localctx = NewFrame_specContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 124, SQLiteParserRULE_frame_spec) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(1706) - p.Frame_clause() - } - p.SetState(1716) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserEXCLUDE_ { - { - p.SetState(1707) - p.Match(SQLiteParserEXCLUDE_) - } - p.SetState(1714) - p.GetErrorHandler().Sync(p) - - switch p.GetTokenStream().LA(1) { - case SQLiteParserNO_: - { - p.SetState(1708) - p.Match(SQLiteParserNO_) - } - { - p.SetState(1709) - p.Match(SQLiteParserOTHERS_) - } - - case SQLiteParserCURRENT_: - { - p.SetState(1710) - p.Match(SQLiteParserCURRENT_) - } - { - p.SetState(1711) - p.Match(SQLiteParserROW_) - } - - case SQLiteParserGROUP_: - { - p.SetState(1712) - p.Match(SQLiteParserGROUP_) - } - - case SQLiteParserTIES_: - { - p.SetState(1713) - p.Match(SQLiteParserTIES_) - } - - default: - panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) - } - - } - - return localctx -} - -// IFrame_clauseContext is an interface to support dynamic dispatch. -type IFrame_clauseContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - RANGE_() antlr.TerminalNode - ROWS_() antlr.TerminalNode - GROUPS_() antlr.TerminalNode - Frame_single() IFrame_singleContext - BETWEEN_() antlr.TerminalNode - Frame_left() IFrame_leftContext - AND_() antlr.TerminalNode - Frame_right() IFrame_rightContext - - // IsFrame_clauseContext differentiates from other interfaces. - IsFrame_clauseContext() -} - -type Frame_clauseContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyFrame_clauseContext() *Frame_clauseContext { - var p = new(Frame_clauseContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_frame_clause - return p -} - -func (*Frame_clauseContext) IsFrame_clauseContext() {} - -func NewFrame_clauseContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Frame_clauseContext { - var p = new(Frame_clauseContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_frame_clause - - return p -} - -func (s *Frame_clauseContext) GetParser() antlr.Parser { return s.parser } - -func (s *Frame_clauseContext) RANGE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserRANGE_, 0) -} - -func (s *Frame_clauseContext) ROWS_() antlr.TerminalNode { - return s.GetToken(SQLiteParserROWS_, 0) -} - -func (s *Frame_clauseContext) GROUPS_() antlr.TerminalNode { - return s.GetToken(SQLiteParserGROUPS_, 0) -} - -func (s *Frame_clauseContext) Frame_single() IFrame_singleContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IFrame_singleContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IFrame_singleContext) -} - -func (s *Frame_clauseContext) BETWEEN_() antlr.TerminalNode { - return s.GetToken(SQLiteParserBETWEEN_, 0) -} - -func (s *Frame_clauseContext) Frame_left() IFrame_leftContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IFrame_leftContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IFrame_leftContext) -} - -func (s *Frame_clauseContext) AND_() antlr.TerminalNode { - return s.GetToken(SQLiteParserAND_, 0) -} - -func (s *Frame_clauseContext) Frame_right() IFrame_rightContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IFrame_rightContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IFrame_rightContext) -} - -func (s *Frame_clauseContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Frame_clauseContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Frame_clauseContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterFrame_clause(s) - } -} - -func (s *Frame_clauseContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitFrame_clause(s) - } -} - -func (p *SQLiteParser) Frame_clause() (localctx IFrame_clauseContext) { - this := p - _ = this - - localctx = NewFrame_clauseContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 126, SQLiteParserRULE_frame_clause) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(1718) - _la = p.GetTokenStream().LA(1) - - if !((int64((_la-128)) & ^0x3f) == 0 && ((int64(1)<<(_la-128))&2251799880794113) != 0) { - p.GetErrorHandler().RecoverInline(p) - } else { - p.GetErrorHandler().ReportMatch(p) - p.Consume() - } - } - p.SetState(1725) - p.GetErrorHandler().Sync(p) - switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 256, p.GetParserRuleContext()) { - case 1: - { - p.SetState(1719) - p.Frame_single() - } - - case 2: - { - p.SetState(1720) - p.Match(SQLiteParserBETWEEN_) - } - { - p.SetState(1721) - p.Frame_left() - } - { - p.SetState(1722) - p.Match(SQLiteParserAND_) - } - { - p.SetState(1723) - p.Frame_right() - } - - } - - return localctx -} - -// ISimple_function_invocationContext is an interface to support dynamic dispatch. -type ISimple_function_invocationContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - Simple_func() ISimple_funcContext - OPEN_PAR() antlr.TerminalNode - CLOSE_PAR() antlr.TerminalNode - AllExpr() []IExprContext - Expr(i int) IExprContext - STAR() antlr.TerminalNode - AllCOMMA() []antlr.TerminalNode - COMMA(i int) antlr.TerminalNode - - // IsSimple_function_invocationContext differentiates from other interfaces. - IsSimple_function_invocationContext() -} - -type Simple_function_invocationContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptySimple_function_invocationContext() *Simple_function_invocationContext { - var p = new(Simple_function_invocationContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_simple_function_invocation - return p -} - -func (*Simple_function_invocationContext) IsSimple_function_invocationContext() {} - -func NewSimple_function_invocationContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Simple_function_invocationContext { - var p = new(Simple_function_invocationContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_simple_function_invocation - - return p -} - -func (s *Simple_function_invocationContext) GetParser() antlr.Parser { return s.parser } - -func (s *Simple_function_invocationContext) Simple_func() ISimple_funcContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ISimple_funcContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ISimple_funcContext) -} - -func (s *Simple_function_invocationContext) OPEN_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserOPEN_PAR, 0) -} - -func (s *Simple_function_invocationContext) CLOSE_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserCLOSE_PAR, 0) -} - -func (s *Simple_function_invocationContext) AllExpr() []IExprContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IExprContext); ok { - len++ - } - } - - tst := make([]IExprContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IExprContext); ok { - tst[i] = t.(IExprContext) - i++ - } - } - - return tst -} - -func (s *Simple_function_invocationContext) Expr(i int) IExprContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IExprContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IExprContext) -} - -func (s *Simple_function_invocationContext) STAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserSTAR, 0) -} - -func (s *Simple_function_invocationContext) AllCOMMA() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserCOMMA) -} - -func (s *Simple_function_invocationContext) COMMA(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserCOMMA, i) -} - -func (s *Simple_function_invocationContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Simple_function_invocationContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Simple_function_invocationContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterSimple_function_invocation(s) - } -} - -func (s *Simple_function_invocationContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitSimple_function_invocation(s) - } -} - -func (p *SQLiteParser) Simple_function_invocation() (localctx ISimple_function_invocationContext) { - this := p - _ = this - - localctx = NewSimple_function_invocationContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 128, SQLiteParserRULE_simple_function_invocation) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(1727) - p.Simple_func() - } - { - p.SetState(1728) - p.Match(SQLiteParserOPEN_PAR) - } - p.SetState(1738) - p.GetErrorHandler().Sync(p) - - switch p.GetTokenStream().LA(1) { - case SQLiteParserOPEN_PAR, SQLiteParserPLUS, SQLiteParserMINUS, SQLiteParserTILDE, SQLiteParserABORT_, SQLiteParserACTION_, SQLiteParserADD_, SQLiteParserAFTER_, SQLiteParserALL_, SQLiteParserALTER_, SQLiteParserANALYZE_, SQLiteParserAND_, SQLiteParserAS_, SQLiteParserASC_, SQLiteParserATTACH_, SQLiteParserAUTOINCREMENT_, SQLiteParserBEFORE_, SQLiteParserBEGIN_, SQLiteParserBETWEEN_, SQLiteParserBY_, SQLiteParserCASCADE_, SQLiteParserCASE_, SQLiteParserCAST_, SQLiteParserCHECK_, SQLiteParserCOLLATE_, SQLiteParserCOLUMN_, SQLiteParserCOMMIT_, SQLiteParserCONFLICT_, SQLiteParserCONSTRAINT_, SQLiteParserCREATE_, SQLiteParserCROSS_, SQLiteParserCURRENT_DATE_, SQLiteParserCURRENT_TIME_, SQLiteParserCURRENT_TIMESTAMP_, SQLiteParserDATABASE_, SQLiteParserDEFAULT_, SQLiteParserDEFERRABLE_, SQLiteParserDEFERRED_, SQLiteParserDELETE_, SQLiteParserDESC_, SQLiteParserDETACH_, SQLiteParserDISTINCT_, SQLiteParserDROP_, SQLiteParserEACH_, SQLiteParserELSE_, SQLiteParserEND_, SQLiteParserESCAPE_, SQLiteParserEXCEPT_, SQLiteParserEXCLUSIVE_, SQLiteParserEXISTS_, SQLiteParserEXPLAIN_, SQLiteParserFAIL_, SQLiteParserFOR_, SQLiteParserFOREIGN_, SQLiteParserFROM_, SQLiteParserFULL_, SQLiteParserGLOB_, SQLiteParserGROUP_, SQLiteParserHAVING_, SQLiteParserIF_, SQLiteParserIGNORE_, SQLiteParserIMMEDIATE_, SQLiteParserIN_, SQLiteParserINDEX_, SQLiteParserINDEXED_, SQLiteParserINITIALLY_, SQLiteParserINNER_, SQLiteParserINSERT_, SQLiteParserINSTEAD_, SQLiteParserINTERSECT_, SQLiteParserINTO_, SQLiteParserIS_, SQLiteParserISNULL_, SQLiteParserJOIN_, SQLiteParserKEY_, SQLiteParserLEFT_, SQLiteParserLIKE_, SQLiteParserLIMIT_, SQLiteParserMATCH_, SQLiteParserNATURAL_, SQLiteParserNO_, SQLiteParserNOT_, SQLiteParserNOTNULL_, SQLiteParserNULL_, SQLiteParserOF_, SQLiteParserOFFSET_, SQLiteParserON_, SQLiteParserOR_, SQLiteParserORDER_, SQLiteParserOUTER_, SQLiteParserPLAN_, SQLiteParserPRAGMA_, SQLiteParserPRIMARY_, SQLiteParserQUERY_, SQLiteParserRAISE_, SQLiteParserRECURSIVE_, SQLiteParserREFERENCES_, SQLiteParserREGEXP_, SQLiteParserREINDEX_, SQLiteParserRELEASE_, SQLiteParserRENAME_, SQLiteParserREPLACE_, SQLiteParserRESTRICT_, SQLiteParserRIGHT_, SQLiteParserROLLBACK_, SQLiteParserROW_, SQLiteParserROWS_, SQLiteParserSAVEPOINT_, SQLiteParserSELECT_, SQLiteParserSET_, SQLiteParserTABLE_, SQLiteParserTEMP_, SQLiteParserTEMPORARY_, SQLiteParserTHEN_, SQLiteParserTO_, SQLiteParserTRANSACTION_, SQLiteParserTRIGGER_, SQLiteParserUNION_, SQLiteParserUNIQUE_, SQLiteParserUPDATE_, SQLiteParserUSING_, SQLiteParserVACUUM_, SQLiteParserVALUES_, SQLiteParserVIEW_, SQLiteParserVIRTUAL_, SQLiteParserWHEN_, SQLiteParserWHERE_, SQLiteParserWITH_, SQLiteParserWITHOUT_, SQLiteParserFIRST_VALUE_, SQLiteParserOVER_, SQLiteParserPARTITION_, SQLiteParserRANGE_, SQLiteParserPRECEDING_, SQLiteParserUNBOUNDED_, SQLiteParserCURRENT_, SQLiteParserFOLLOWING_, SQLiteParserCUME_DIST_, SQLiteParserDENSE_RANK_, SQLiteParserLAG_, SQLiteParserLAST_VALUE_, SQLiteParserLEAD_, SQLiteParserNTH_VALUE_, SQLiteParserNTILE_, SQLiteParserPERCENT_RANK_, SQLiteParserRANK_, SQLiteParserROW_NUMBER_, SQLiteParserGENERATED_, SQLiteParserALWAYS_, SQLiteParserSTORED_, SQLiteParserTRUE_, SQLiteParserFALSE_, SQLiteParserWINDOW_, SQLiteParserNULLS_, SQLiteParserFIRST_, SQLiteParserLAST_, SQLiteParserFILTER_, SQLiteParserGROUPS_, SQLiteParserEXCLUDE_, SQLiteParserIDENTIFIER, SQLiteParserNUMERIC_LITERAL, SQLiteParserBIND_PARAMETER, SQLiteParserSTRING_LITERAL, SQLiteParserBLOB_LITERAL: - { - p.SetState(1729) - p.expr(0) - } - p.SetState(1734) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for _la == SQLiteParserCOMMA { - { - p.SetState(1730) - p.Match(SQLiteParserCOMMA) - } - { - p.SetState(1731) - p.expr(0) - } - - p.SetState(1736) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - - case SQLiteParserSTAR: - { - p.SetState(1737) - p.Match(SQLiteParserSTAR) - } - - default: - panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) - } - { - p.SetState(1740) - p.Match(SQLiteParserCLOSE_PAR) - } - - return localctx -} - -// IAggregate_function_invocationContext is an interface to support dynamic dispatch. -type IAggregate_function_invocationContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - Aggregate_func() IAggregate_funcContext - OPEN_PAR() antlr.TerminalNode - CLOSE_PAR() antlr.TerminalNode - AllExpr() []IExprContext - Expr(i int) IExprContext - STAR() antlr.TerminalNode - Filter_clause() IFilter_clauseContext - DISTINCT_() antlr.TerminalNode - AllCOMMA() []antlr.TerminalNode - COMMA(i int) antlr.TerminalNode - - // IsAggregate_function_invocationContext differentiates from other interfaces. - IsAggregate_function_invocationContext() -} - -type Aggregate_function_invocationContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyAggregate_function_invocationContext() *Aggregate_function_invocationContext { - var p = new(Aggregate_function_invocationContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_aggregate_function_invocation - return p -} - -func (*Aggregate_function_invocationContext) IsAggregate_function_invocationContext() {} - -func NewAggregate_function_invocationContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Aggregate_function_invocationContext { - var p = new(Aggregate_function_invocationContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_aggregate_function_invocation - - return p -} - -func (s *Aggregate_function_invocationContext) GetParser() antlr.Parser { return s.parser } - -func (s *Aggregate_function_invocationContext) Aggregate_func() IAggregate_funcContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IAggregate_funcContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IAggregate_funcContext) -} - -func (s *Aggregate_function_invocationContext) OPEN_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserOPEN_PAR, 0) -} - -func (s *Aggregate_function_invocationContext) CLOSE_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserCLOSE_PAR, 0) -} - -func (s *Aggregate_function_invocationContext) AllExpr() []IExprContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IExprContext); ok { - len++ - } - } - - tst := make([]IExprContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IExprContext); ok { - tst[i] = t.(IExprContext) - i++ - } - } - - return tst -} - -func (s *Aggregate_function_invocationContext) Expr(i int) IExprContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IExprContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IExprContext) -} - -func (s *Aggregate_function_invocationContext) STAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserSTAR, 0) -} - -func (s *Aggregate_function_invocationContext) Filter_clause() IFilter_clauseContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IFilter_clauseContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IFilter_clauseContext) -} - -func (s *Aggregate_function_invocationContext) DISTINCT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserDISTINCT_, 0) -} - -func (s *Aggregate_function_invocationContext) AllCOMMA() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserCOMMA) -} - -func (s *Aggregate_function_invocationContext) COMMA(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserCOMMA, i) -} - -func (s *Aggregate_function_invocationContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Aggregate_function_invocationContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Aggregate_function_invocationContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterAggregate_function_invocation(s) - } -} - -func (s *Aggregate_function_invocationContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitAggregate_function_invocation(s) - } -} - -func (p *SQLiteParser) Aggregate_function_invocation() (localctx IAggregate_function_invocationContext) { - this := p - _ = this - - localctx = NewAggregate_function_invocationContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 130, SQLiteParserRULE_aggregate_function_invocation) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(1742) - p.Aggregate_func() - } - { - p.SetState(1743) - p.Match(SQLiteParserOPEN_PAR) - } - p.SetState(1756) - p.GetErrorHandler().Sync(p) - - switch p.GetTokenStream().LA(1) { - case SQLiteParserOPEN_PAR, SQLiteParserPLUS, SQLiteParserMINUS, SQLiteParserTILDE, SQLiteParserABORT_, SQLiteParserACTION_, SQLiteParserADD_, SQLiteParserAFTER_, SQLiteParserALL_, SQLiteParserALTER_, SQLiteParserANALYZE_, SQLiteParserAND_, SQLiteParserAS_, SQLiteParserASC_, SQLiteParserATTACH_, SQLiteParserAUTOINCREMENT_, SQLiteParserBEFORE_, SQLiteParserBEGIN_, SQLiteParserBETWEEN_, SQLiteParserBY_, SQLiteParserCASCADE_, SQLiteParserCASE_, SQLiteParserCAST_, SQLiteParserCHECK_, SQLiteParserCOLLATE_, SQLiteParserCOLUMN_, SQLiteParserCOMMIT_, SQLiteParserCONFLICT_, SQLiteParserCONSTRAINT_, SQLiteParserCREATE_, SQLiteParserCROSS_, SQLiteParserCURRENT_DATE_, SQLiteParserCURRENT_TIME_, SQLiteParserCURRENT_TIMESTAMP_, SQLiteParserDATABASE_, SQLiteParserDEFAULT_, SQLiteParserDEFERRABLE_, SQLiteParserDEFERRED_, SQLiteParserDELETE_, SQLiteParserDESC_, SQLiteParserDETACH_, SQLiteParserDISTINCT_, SQLiteParserDROP_, SQLiteParserEACH_, SQLiteParserELSE_, SQLiteParserEND_, SQLiteParserESCAPE_, SQLiteParserEXCEPT_, SQLiteParserEXCLUSIVE_, SQLiteParserEXISTS_, SQLiteParserEXPLAIN_, SQLiteParserFAIL_, SQLiteParserFOR_, SQLiteParserFOREIGN_, SQLiteParserFROM_, SQLiteParserFULL_, SQLiteParserGLOB_, SQLiteParserGROUP_, SQLiteParserHAVING_, SQLiteParserIF_, SQLiteParserIGNORE_, SQLiteParserIMMEDIATE_, SQLiteParserIN_, SQLiteParserINDEX_, SQLiteParserINDEXED_, SQLiteParserINITIALLY_, SQLiteParserINNER_, SQLiteParserINSERT_, SQLiteParserINSTEAD_, SQLiteParserINTERSECT_, SQLiteParserINTO_, SQLiteParserIS_, SQLiteParserISNULL_, SQLiteParserJOIN_, SQLiteParserKEY_, SQLiteParserLEFT_, SQLiteParserLIKE_, SQLiteParserLIMIT_, SQLiteParserMATCH_, SQLiteParserNATURAL_, SQLiteParserNO_, SQLiteParserNOT_, SQLiteParserNOTNULL_, SQLiteParserNULL_, SQLiteParserOF_, SQLiteParserOFFSET_, SQLiteParserON_, SQLiteParserOR_, SQLiteParserORDER_, SQLiteParserOUTER_, SQLiteParserPLAN_, SQLiteParserPRAGMA_, SQLiteParserPRIMARY_, SQLiteParserQUERY_, SQLiteParserRAISE_, SQLiteParserRECURSIVE_, SQLiteParserREFERENCES_, SQLiteParserREGEXP_, SQLiteParserREINDEX_, SQLiteParserRELEASE_, SQLiteParserRENAME_, SQLiteParserREPLACE_, SQLiteParserRESTRICT_, SQLiteParserRIGHT_, SQLiteParserROLLBACK_, SQLiteParserROW_, SQLiteParserROWS_, SQLiteParserSAVEPOINT_, SQLiteParserSELECT_, SQLiteParserSET_, SQLiteParserTABLE_, SQLiteParserTEMP_, SQLiteParserTEMPORARY_, SQLiteParserTHEN_, SQLiteParserTO_, SQLiteParserTRANSACTION_, SQLiteParserTRIGGER_, SQLiteParserUNION_, SQLiteParserUNIQUE_, SQLiteParserUPDATE_, SQLiteParserUSING_, SQLiteParserVACUUM_, SQLiteParserVALUES_, SQLiteParserVIEW_, SQLiteParserVIRTUAL_, SQLiteParserWHEN_, SQLiteParserWHERE_, SQLiteParserWITH_, SQLiteParserWITHOUT_, SQLiteParserFIRST_VALUE_, SQLiteParserOVER_, SQLiteParserPARTITION_, SQLiteParserRANGE_, SQLiteParserPRECEDING_, SQLiteParserUNBOUNDED_, SQLiteParserCURRENT_, SQLiteParserFOLLOWING_, SQLiteParserCUME_DIST_, SQLiteParserDENSE_RANK_, SQLiteParserLAG_, SQLiteParserLAST_VALUE_, SQLiteParserLEAD_, SQLiteParserNTH_VALUE_, SQLiteParserNTILE_, SQLiteParserPERCENT_RANK_, SQLiteParserRANK_, SQLiteParserROW_NUMBER_, SQLiteParserGENERATED_, SQLiteParserALWAYS_, SQLiteParserSTORED_, SQLiteParserTRUE_, SQLiteParserFALSE_, SQLiteParserWINDOW_, SQLiteParserNULLS_, SQLiteParserFIRST_, SQLiteParserLAST_, SQLiteParserFILTER_, SQLiteParserGROUPS_, SQLiteParserEXCLUDE_, SQLiteParserIDENTIFIER, SQLiteParserNUMERIC_LITERAL, SQLiteParserBIND_PARAMETER, SQLiteParserSTRING_LITERAL, SQLiteParserBLOB_LITERAL: - p.SetState(1745) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 259, p.GetParserRuleContext()) == 1 { - { - p.SetState(1744) - p.Match(SQLiteParserDISTINCT_) - } - - } - { - p.SetState(1747) - p.expr(0) - } - p.SetState(1752) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for _la == SQLiteParserCOMMA { - { - p.SetState(1748) - p.Match(SQLiteParserCOMMA) - } - { - p.SetState(1749) - p.expr(0) - } - - p.SetState(1754) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - - case SQLiteParserSTAR: - { - p.SetState(1755) - p.Match(SQLiteParserSTAR) - } - - case SQLiteParserCLOSE_PAR: - - default: - } - { - p.SetState(1758) - p.Match(SQLiteParserCLOSE_PAR) - } - p.SetState(1760) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserFILTER_ { - { - p.SetState(1759) - p.Filter_clause() - } - - } - - return localctx -} - -// IWindow_function_invocationContext is an interface to support dynamic dispatch. -type IWindow_function_invocationContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - Window_function() IWindow_functionContext - OPEN_PAR() antlr.TerminalNode - CLOSE_PAR() antlr.TerminalNode - OVER_() antlr.TerminalNode - Window_defn() IWindow_defnContext - Window_name() IWindow_nameContext - AllExpr() []IExprContext - Expr(i int) IExprContext - STAR() antlr.TerminalNode - Filter_clause() IFilter_clauseContext - AllCOMMA() []antlr.TerminalNode - COMMA(i int) antlr.TerminalNode - - // IsWindow_function_invocationContext differentiates from other interfaces. - IsWindow_function_invocationContext() -} - -type Window_function_invocationContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyWindow_function_invocationContext() *Window_function_invocationContext { - var p = new(Window_function_invocationContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_window_function_invocation - return p -} - -func (*Window_function_invocationContext) IsWindow_function_invocationContext() {} - -func NewWindow_function_invocationContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Window_function_invocationContext { - var p = new(Window_function_invocationContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_window_function_invocation - - return p -} - -func (s *Window_function_invocationContext) GetParser() antlr.Parser { return s.parser } - -func (s *Window_function_invocationContext) Window_function() IWindow_functionContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IWindow_functionContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IWindow_functionContext) -} - -func (s *Window_function_invocationContext) OPEN_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserOPEN_PAR, 0) -} - -func (s *Window_function_invocationContext) CLOSE_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserCLOSE_PAR, 0) -} - -func (s *Window_function_invocationContext) OVER_() antlr.TerminalNode { - return s.GetToken(SQLiteParserOVER_, 0) -} - -func (s *Window_function_invocationContext) Window_defn() IWindow_defnContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IWindow_defnContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IWindow_defnContext) -} - -func (s *Window_function_invocationContext) Window_name() IWindow_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IWindow_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IWindow_nameContext) -} - -func (s *Window_function_invocationContext) AllExpr() []IExprContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IExprContext); ok { - len++ - } - } - - tst := make([]IExprContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IExprContext); ok { - tst[i] = t.(IExprContext) - i++ - } - } - - return tst -} - -func (s *Window_function_invocationContext) Expr(i int) IExprContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IExprContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IExprContext) -} - -func (s *Window_function_invocationContext) STAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserSTAR, 0) -} - -func (s *Window_function_invocationContext) Filter_clause() IFilter_clauseContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IFilter_clauseContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IFilter_clauseContext) -} - -func (s *Window_function_invocationContext) AllCOMMA() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserCOMMA) -} - -func (s *Window_function_invocationContext) COMMA(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserCOMMA, i) -} - -func (s *Window_function_invocationContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Window_function_invocationContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Window_function_invocationContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterWindow_function_invocation(s) - } -} - -func (s *Window_function_invocationContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitWindow_function_invocation(s) - } -} - -func (p *SQLiteParser) Window_function_invocation() (localctx IWindow_function_invocationContext) { - this := p - _ = this - - localctx = NewWindow_function_invocationContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 132, SQLiteParserRULE_window_function_invocation) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(1762) - p.Window_function() - } - { - p.SetState(1763) - p.Match(SQLiteParserOPEN_PAR) - } - p.SetState(1773) - p.GetErrorHandler().Sync(p) - - switch p.GetTokenStream().LA(1) { - case SQLiteParserOPEN_PAR, SQLiteParserPLUS, SQLiteParserMINUS, SQLiteParserTILDE, SQLiteParserABORT_, SQLiteParserACTION_, SQLiteParserADD_, SQLiteParserAFTER_, SQLiteParserALL_, SQLiteParserALTER_, SQLiteParserANALYZE_, SQLiteParserAND_, SQLiteParserAS_, SQLiteParserASC_, SQLiteParserATTACH_, SQLiteParserAUTOINCREMENT_, SQLiteParserBEFORE_, SQLiteParserBEGIN_, SQLiteParserBETWEEN_, SQLiteParserBY_, SQLiteParserCASCADE_, SQLiteParserCASE_, SQLiteParserCAST_, SQLiteParserCHECK_, SQLiteParserCOLLATE_, SQLiteParserCOLUMN_, SQLiteParserCOMMIT_, SQLiteParserCONFLICT_, SQLiteParserCONSTRAINT_, SQLiteParserCREATE_, SQLiteParserCROSS_, SQLiteParserCURRENT_DATE_, SQLiteParserCURRENT_TIME_, SQLiteParserCURRENT_TIMESTAMP_, SQLiteParserDATABASE_, SQLiteParserDEFAULT_, SQLiteParserDEFERRABLE_, SQLiteParserDEFERRED_, SQLiteParserDELETE_, SQLiteParserDESC_, SQLiteParserDETACH_, SQLiteParserDISTINCT_, SQLiteParserDROP_, SQLiteParserEACH_, SQLiteParserELSE_, SQLiteParserEND_, SQLiteParserESCAPE_, SQLiteParserEXCEPT_, SQLiteParserEXCLUSIVE_, SQLiteParserEXISTS_, SQLiteParserEXPLAIN_, SQLiteParserFAIL_, SQLiteParserFOR_, SQLiteParserFOREIGN_, SQLiteParserFROM_, SQLiteParserFULL_, SQLiteParserGLOB_, SQLiteParserGROUP_, SQLiteParserHAVING_, SQLiteParserIF_, SQLiteParserIGNORE_, SQLiteParserIMMEDIATE_, SQLiteParserIN_, SQLiteParserINDEX_, SQLiteParserINDEXED_, SQLiteParserINITIALLY_, SQLiteParserINNER_, SQLiteParserINSERT_, SQLiteParserINSTEAD_, SQLiteParserINTERSECT_, SQLiteParserINTO_, SQLiteParserIS_, SQLiteParserISNULL_, SQLiteParserJOIN_, SQLiteParserKEY_, SQLiteParserLEFT_, SQLiteParserLIKE_, SQLiteParserLIMIT_, SQLiteParserMATCH_, SQLiteParserNATURAL_, SQLiteParserNO_, SQLiteParserNOT_, SQLiteParserNOTNULL_, SQLiteParserNULL_, SQLiteParserOF_, SQLiteParserOFFSET_, SQLiteParserON_, SQLiteParserOR_, SQLiteParserORDER_, SQLiteParserOUTER_, SQLiteParserPLAN_, SQLiteParserPRAGMA_, SQLiteParserPRIMARY_, SQLiteParserQUERY_, SQLiteParserRAISE_, SQLiteParserRECURSIVE_, SQLiteParserREFERENCES_, SQLiteParserREGEXP_, SQLiteParserREINDEX_, SQLiteParserRELEASE_, SQLiteParserRENAME_, SQLiteParserREPLACE_, SQLiteParserRESTRICT_, SQLiteParserRIGHT_, SQLiteParserROLLBACK_, SQLiteParserROW_, SQLiteParserROWS_, SQLiteParserSAVEPOINT_, SQLiteParserSELECT_, SQLiteParserSET_, SQLiteParserTABLE_, SQLiteParserTEMP_, SQLiteParserTEMPORARY_, SQLiteParserTHEN_, SQLiteParserTO_, SQLiteParserTRANSACTION_, SQLiteParserTRIGGER_, SQLiteParserUNION_, SQLiteParserUNIQUE_, SQLiteParserUPDATE_, SQLiteParserUSING_, SQLiteParserVACUUM_, SQLiteParserVALUES_, SQLiteParserVIEW_, SQLiteParserVIRTUAL_, SQLiteParserWHEN_, SQLiteParserWHERE_, SQLiteParserWITH_, SQLiteParserWITHOUT_, SQLiteParserFIRST_VALUE_, SQLiteParserOVER_, SQLiteParserPARTITION_, SQLiteParserRANGE_, SQLiteParserPRECEDING_, SQLiteParserUNBOUNDED_, SQLiteParserCURRENT_, SQLiteParserFOLLOWING_, SQLiteParserCUME_DIST_, SQLiteParserDENSE_RANK_, SQLiteParserLAG_, SQLiteParserLAST_VALUE_, SQLiteParserLEAD_, SQLiteParserNTH_VALUE_, SQLiteParserNTILE_, SQLiteParserPERCENT_RANK_, SQLiteParserRANK_, SQLiteParserROW_NUMBER_, SQLiteParserGENERATED_, SQLiteParserALWAYS_, SQLiteParserSTORED_, SQLiteParserTRUE_, SQLiteParserFALSE_, SQLiteParserWINDOW_, SQLiteParserNULLS_, SQLiteParserFIRST_, SQLiteParserLAST_, SQLiteParserFILTER_, SQLiteParserGROUPS_, SQLiteParserEXCLUDE_, SQLiteParserIDENTIFIER, SQLiteParserNUMERIC_LITERAL, SQLiteParserBIND_PARAMETER, SQLiteParserSTRING_LITERAL, SQLiteParserBLOB_LITERAL: - { - p.SetState(1764) - p.expr(0) - } - p.SetState(1769) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for _la == SQLiteParserCOMMA { - { - p.SetState(1765) - p.Match(SQLiteParserCOMMA) - } - { - p.SetState(1766) - p.expr(0) - } - - p.SetState(1771) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - - case SQLiteParserSTAR: - { - p.SetState(1772) - p.Match(SQLiteParserSTAR) - } - - case SQLiteParserCLOSE_PAR: - - default: - } - { - p.SetState(1775) - p.Match(SQLiteParserCLOSE_PAR) - } - p.SetState(1777) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserFILTER_ { - { - p.SetState(1776) - p.Filter_clause() - } - - } - { - p.SetState(1779) - p.Match(SQLiteParserOVER_) - } - p.SetState(1782) - p.GetErrorHandler().Sync(p) - switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 266, p.GetParserRuleContext()) { - case 1: - { - p.SetState(1780) - p.Window_defn() - } - - case 2: - { - p.SetState(1781) - p.Window_name() - } - - } - - return localctx -} - -// ICommon_table_stmtContext is an interface to support dynamic dispatch. -type ICommon_table_stmtContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - WITH_() antlr.TerminalNode - AllCommon_table_expression() []ICommon_table_expressionContext - Common_table_expression(i int) ICommon_table_expressionContext - RECURSIVE_() antlr.TerminalNode - AllCOMMA() []antlr.TerminalNode - COMMA(i int) antlr.TerminalNode - - // IsCommon_table_stmtContext differentiates from other interfaces. - IsCommon_table_stmtContext() -} - -type Common_table_stmtContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyCommon_table_stmtContext() *Common_table_stmtContext { - var p = new(Common_table_stmtContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_common_table_stmt - return p -} - -func (*Common_table_stmtContext) IsCommon_table_stmtContext() {} - -func NewCommon_table_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Common_table_stmtContext { - var p = new(Common_table_stmtContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_common_table_stmt - - return p -} - -func (s *Common_table_stmtContext) GetParser() antlr.Parser { return s.parser } - -func (s *Common_table_stmtContext) WITH_() antlr.TerminalNode { - return s.GetToken(SQLiteParserWITH_, 0) -} - -func (s *Common_table_stmtContext) AllCommon_table_expression() []ICommon_table_expressionContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(ICommon_table_expressionContext); ok { - len++ - } - } - - tst := make([]ICommon_table_expressionContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(ICommon_table_expressionContext); ok { - tst[i] = t.(ICommon_table_expressionContext) - i++ - } - } - - return tst -} - -func (s *Common_table_stmtContext) Common_table_expression(i int) ICommon_table_expressionContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ICommon_table_expressionContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(ICommon_table_expressionContext) -} - -func (s *Common_table_stmtContext) RECURSIVE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserRECURSIVE_, 0) -} - -func (s *Common_table_stmtContext) AllCOMMA() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserCOMMA) -} - -func (s *Common_table_stmtContext) COMMA(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserCOMMA, i) -} - -func (s *Common_table_stmtContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Common_table_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Common_table_stmtContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterCommon_table_stmt(s) - } -} - -func (s *Common_table_stmtContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitCommon_table_stmt(s) - } -} - -func (p *SQLiteParser) Common_table_stmt() (localctx ICommon_table_stmtContext) { - this := p - _ = this - - localctx = NewCommon_table_stmtContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 134, SQLiteParserRULE_common_table_stmt) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(1784) - p.Match(SQLiteParserWITH_) - } - p.SetState(1786) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 267, p.GetParserRuleContext()) == 1 { - { - p.SetState(1785) - p.Match(SQLiteParserRECURSIVE_) - } - - } - { - p.SetState(1788) - p.Common_table_expression() - } - p.SetState(1793) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for _la == SQLiteParserCOMMA { - { - p.SetState(1789) - p.Match(SQLiteParserCOMMA) - } - { - p.SetState(1790) - p.Common_table_expression() - } - - p.SetState(1795) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - - return localctx -} - -// IOrder_by_stmtContext is an interface to support dynamic dispatch. -type IOrder_by_stmtContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - ORDER_() antlr.TerminalNode - BY_() antlr.TerminalNode - AllOrdering_term() []IOrdering_termContext - Ordering_term(i int) IOrdering_termContext - AllCOMMA() []antlr.TerminalNode - COMMA(i int) antlr.TerminalNode - - // IsOrder_by_stmtContext differentiates from other interfaces. - IsOrder_by_stmtContext() -} - -type Order_by_stmtContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyOrder_by_stmtContext() *Order_by_stmtContext { - var p = new(Order_by_stmtContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_order_by_stmt - return p -} - -func (*Order_by_stmtContext) IsOrder_by_stmtContext() {} - -func NewOrder_by_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Order_by_stmtContext { - var p = new(Order_by_stmtContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_order_by_stmt - - return p -} - -func (s *Order_by_stmtContext) GetParser() antlr.Parser { return s.parser } - -func (s *Order_by_stmtContext) ORDER_() antlr.TerminalNode { - return s.GetToken(SQLiteParserORDER_, 0) -} - -func (s *Order_by_stmtContext) BY_() antlr.TerminalNode { - return s.GetToken(SQLiteParserBY_, 0) -} - -func (s *Order_by_stmtContext) AllOrdering_term() []IOrdering_termContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IOrdering_termContext); ok { - len++ - } - } - - tst := make([]IOrdering_termContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IOrdering_termContext); ok { - tst[i] = t.(IOrdering_termContext) - i++ - } - } - - return tst -} - -func (s *Order_by_stmtContext) Ordering_term(i int) IOrdering_termContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IOrdering_termContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IOrdering_termContext) -} - -func (s *Order_by_stmtContext) AllCOMMA() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserCOMMA) -} - -func (s *Order_by_stmtContext) COMMA(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserCOMMA, i) -} - -func (s *Order_by_stmtContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Order_by_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Order_by_stmtContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterOrder_by_stmt(s) - } -} - -func (s *Order_by_stmtContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitOrder_by_stmt(s) - } -} - -func (p *SQLiteParser) Order_by_stmt() (localctx IOrder_by_stmtContext) { - this := p - _ = this - - localctx = NewOrder_by_stmtContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 136, SQLiteParserRULE_order_by_stmt) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(1796) - p.Match(SQLiteParserORDER_) - } - { - p.SetState(1797) - p.Match(SQLiteParserBY_) - } - { - p.SetState(1798) - p.Ordering_term() - } - p.SetState(1803) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for _la == SQLiteParserCOMMA { - { - p.SetState(1799) - p.Match(SQLiteParserCOMMA) - } - { - p.SetState(1800) - p.Ordering_term() - } - - p.SetState(1805) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - - return localctx -} - -// ILimit_stmtContext is an interface to support dynamic dispatch. -type ILimit_stmtContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - LIMIT_() antlr.TerminalNode - AllExpr() []IExprContext - Expr(i int) IExprContext - OFFSET_() antlr.TerminalNode - COMMA() antlr.TerminalNode - - // IsLimit_stmtContext differentiates from other interfaces. - IsLimit_stmtContext() -} - -type Limit_stmtContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyLimit_stmtContext() *Limit_stmtContext { - var p = new(Limit_stmtContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_limit_stmt - return p -} - -func (*Limit_stmtContext) IsLimit_stmtContext() {} - -func NewLimit_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Limit_stmtContext { - var p = new(Limit_stmtContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_limit_stmt - - return p -} - -func (s *Limit_stmtContext) GetParser() antlr.Parser { return s.parser } - -func (s *Limit_stmtContext) LIMIT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserLIMIT_, 0) -} - -func (s *Limit_stmtContext) AllExpr() []IExprContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IExprContext); ok { - len++ - } - } - - tst := make([]IExprContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IExprContext); ok { - tst[i] = t.(IExprContext) - i++ - } - } - - return tst -} - -func (s *Limit_stmtContext) Expr(i int) IExprContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IExprContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IExprContext) -} - -func (s *Limit_stmtContext) OFFSET_() antlr.TerminalNode { - return s.GetToken(SQLiteParserOFFSET_, 0) -} - -func (s *Limit_stmtContext) COMMA() antlr.TerminalNode { - return s.GetToken(SQLiteParserCOMMA, 0) -} - -func (s *Limit_stmtContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Limit_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Limit_stmtContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterLimit_stmt(s) - } -} - -func (s *Limit_stmtContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitLimit_stmt(s) - } -} - -func (p *SQLiteParser) Limit_stmt() (localctx ILimit_stmtContext) { - this := p - _ = this - - localctx = NewLimit_stmtContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 138, SQLiteParserRULE_limit_stmt) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(1806) - p.Match(SQLiteParserLIMIT_) - } - { - p.SetState(1807) - p.expr(0) - } - p.SetState(1810) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserCOMMA || _la == SQLiteParserOFFSET_ { - { - p.SetState(1808) - _la = p.GetTokenStream().LA(1) - - if !(_la == SQLiteParserCOMMA || _la == SQLiteParserOFFSET_) { - p.GetErrorHandler().RecoverInline(p) - } else { - p.GetErrorHandler().ReportMatch(p) - p.Consume() - } - } - { - p.SetState(1809) - p.expr(0) - } - - } - - return localctx -} - -// IOrdering_termContext is an interface to support dynamic dispatch. -type IOrdering_termContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - Expr() IExprContext - COLLATE_() antlr.TerminalNode - Collation_name() ICollation_nameContext - Asc_desc() IAsc_descContext - NULLS_() antlr.TerminalNode - FIRST_() antlr.TerminalNode - LAST_() antlr.TerminalNode - - // IsOrdering_termContext differentiates from other interfaces. - IsOrdering_termContext() -} - -type Ordering_termContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyOrdering_termContext() *Ordering_termContext { - var p = new(Ordering_termContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_ordering_term - return p -} - -func (*Ordering_termContext) IsOrdering_termContext() {} - -func NewOrdering_termContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Ordering_termContext { - var p = new(Ordering_termContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_ordering_term - - return p -} - -func (s *Ordering_termContext) GetParser() antlr.Parser { return s.parser } - -func (s *Ordering_termContext) Expr() IExprContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IExprContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IExprContext) -} - -func (s *Ordering_termContext) COLLATE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserCOLLATE_, 0) -} - -func (s *Ordering_termContext) Collation_name() ICollation_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ICollation_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ICollation_nameContext) -} - -func (s *Ordering_termContext) Asc_desc() IAsc_descContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IAsc_descContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IAsc_descContext) -} - -func (s *Ordering_termContext) NULLS_() antlr.TerminalNode { - return s.GetToken(SQLiteParserNULLS_, 0) -} - -func (s *Ordering_termContext) FIRST_() antlr.TerminalNode { - return s.GetToken(SQLiteParserFIRST_, 0) -} - -func (s *Ordering_termContext) LAST_() antlr.TerminalNode { - return s.GetToken(SQLiteParserLAST_, 0) -} - -func (s *Ordering_termContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Ordering_termContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Ordering_termContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterOrdering_term(s) - } -} - -func (s *Ordering_termContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitOrdering_term(s) - } -} - -func (p *SQLiteParser) Ordering_term() (localctx IOrdering_termContext) { - this := p - _ = this - - localctx = NewOrdering_termContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 140, SQLiteParserRULE_ordering_term) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(1812) - p.expr(0) - } - p.SetState(1815) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserCOLLATE_ { - { - p.SetState(1813) - p.Match(SQLiteParserCOLLATE_) - } - { - p.SetState(1814) - p.Collation_name() - } - - } - p.SetState(1818) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserASC_ || _la == SQLiteParserDESC_ { - { - p.SetState(1817) - p.Asc_desc() - } - - } - p.SetState(1822) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserNULLS_ { - { - p.SetState(1820) - p.Match(SQLiteParserNULLS_) - } - { - p.SetState(1821) - _la = p.GetTokenStream().LA(1) - - if !(_la == SQLiteParserFIRST_ || _la == SQLiteParserLAST_) { - p.GetErrorHandler().RecoverInline(p) - } else { - p.GetErrorHandler().ReportMatch(p) - p.Consume() - } - } - - } - - return localctx -} - -// IAsc_descContext is an interface to support dynamic dispatch. -type IAsc_descContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - ASC_() antlr.TerminalNode - DESC_() antlr.TerminalNode - - // IsAsc_descContext differentiates from other interfaces. - IsAsc_descContext() -} - -type Asc_descContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyAsc_descContext() *Asc_descContext { - var p = new(Asc_descContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_asc_desc - return p -} - -func (*Asc_descContext) IsAsc_descContext() {} - -func NewAsc_descContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Asc_descContext { - var p = new(Asc_descContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_asc_desc - - return p -} - -func (s *Asc_descContext) GetParser() antlr.Parser { return s.parser } - -func (s *Asc_descContext) ASC_() antlr.TerminalNode { - return s.GetToken(SQLiteParserASC_, 0) -} - -func (s *Asc_descContext) DESC_() antlr.TerminalNode { - return s.GetToken(SQLiteParserDESC_, 0) -} - -func (s *Asc_descContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Asc_descContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Asc_descContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterAsc_desc(s) - } -} - -func (s *Asc_descContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitAsc_desc(s) - } -} - -func (p *SQLiteParser) Asc_desc() (localctx IAsc_descContext) { - this := p - _ = this - - localctx = NewAsc_descContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 142, SQLiteParserRULE_asc_desc) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(1824) - _la = p.GetTokenStream().LA(1) - - if !(_la == SQLiteParserASC_ || _la == SQLiteParserDESC_) { - p.GetErrorHandler().RecoverInline(p) - } else { - p.GetErrorHandler().ReportMatch(p) - p.Consume() - } - } - - return localctx -} - -// IFrame_leftContext is an interface to support dynamic dispatch. -type IFrame_leftContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - Expr() IExprContext - PRECEDING_() antlr.TerminalNode - FOLLOWING_() antlr.TerminalNode - CURRENT_() antlr.TerminalNode - ROW_() antlr.TerminalNode - UNBOUNDED_() antlr.TerminalNode - - // IsFrame_leftContext differentiates from other interfaces. - IsFrame_leftContext() -} - -type Frame_leftContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyFrame_leftContext() *Frame_leftContext { - var p = new(Frame_leftContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_frame_left - return p -} - -func (*Frame_leftContext) IsFrame_leftContext() {} - -func NewFrame_leftContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Frame_leftContext { - var p = new(Frame_leftContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_frame_left - - return p -} - -func (s *Frame_leftContext) GetParser() antlr.Parser { return s.parser } - -func (s *Frame_leftContext) Expr() IExprContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IExprContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IExprContext) -} - -func (s *Frame_leftContext) PRECEDING_() antlr.TerminalNode { - return s.GetToken(SQLiteParserPRECEDING_, 0) -} - -func (s *Frame_leftContext) FOLLOWING_() antlr.TerminalNode { - return s.GetToken(SQLiteParserFOLLOWING_, 0) -} - -func (s *Frame_leftContext) CURRENT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserCURRENT_, 0) -} - -func (s *Frame_leftContext) ROW_() antlr.TerminalNode { - return s.GetToken(SQLiteParserROW_, 0) -} - -func (s *Frame_leftContext) UNBOUNDED_() antlr.TerminalNode { - return s.GetToken(SQLiteParserUNBOUNDED_, 0) -} - -func (s *Frame_leftContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Frame_leftContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Frame_leftContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterFrame_left(s) - } -} - -func (s *Frame_leftContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitFrame_left(s) - } -} - -func (p *SQLiteParser) Frame_left() (localctx IFrame_leftContext) { - this := p - _ = this - - localctx = NewFrame_leftContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 144, SQLiteParserRULE_frame_left) - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.SetState(1836) - p.GetErrorHandler().Sync(p) - switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 274, p.GetParserRuleContext()) { - case 1: - p.EnterOuterAlt(localctx, 1) - { - p.SetState(1826) - p.expr(0) - } - { - p.SetState(1827) - p.Match(SQLiteParserPRECEDING_) - } - - case 2: - p.EnterOuterAlt(localctx, 2) - { - p.SetState(1829) - p.expr(0) - } - { - p.SetState(1830) - p.Match(SQLiteParserFOLLOWING_) - } - - case 3: - p.EnterOuterAlt(localctx, 3) - { - p.SetState(1832) - p.Match(SQLiteParserCURRENT_) - } - { - p.SetState(1833) - p.Match(SQLiteParserROW_) - } - - case 4: - p.EnterOuterAlt(localctx, 4) - { - p.SetState(1834) - p.Match(SQLiteParserUNBOUNDED_) - } - { - p.SetState(1835) - p.Match(SQLiteParserPRECEDING_) - } - - } - - return localctx -} - -// IFrame_rightContext is an interface to support dynamic dispatch. -type IFrame_rightContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - Expr() IExprContext - PRECEDING_() antlr.TerminalNode - FOLLOWING_() antlr.TerminalNode - CURRENT_() antlr.TerminalNode - ROW_() antlr.TerminalNode - UNBOUNDED_() antlr.TerminalNode - - // IsFrame_rightContext differentiates from other interfaces. - IsFrame_rightContext() -} - -type Frame_rightContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyFrame_rightContext() *Frame_rightContext { - var p = new(Frame_rightContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_frame_right - return p -} - -func (*Frame_rightContext) IsFrame_rightContext() {} - -func NewFrame_rightContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Frame_rightContext { - var p = new(Frame_rightContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_frame_right - - return p -} - -func (s *Frame_rightContext) GetParser() antlr.Parser { return s.parser } - -func (s *Frame_rightContext) Expr() IExprContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IExprContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IExprContext) -} - -func (s *Frame_rightContext) PRECEDING_() antlr.TerminalNode { - return s.GetToken(SQLiteParserPRECEDING_, 0) -} - -func (s *Frame_rightContext) FOLLOWING_() antlr.TerminalNode { - return s.GetToken(SQLiteParserFOLLOWING_, 0) -} - -func (s *Frame_rightContext) CURRENT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserCURRENT_, 0) -} - -func (s *Frame_rightContext) ROW_() antlr.TerminalNode { - return s.GetToken(SQLiteParserROW_, 0) -} - -func (s *Frame_rightContext) UNBOUNDED_() antlr.TerminalNode { - return s.GetToken(SQLiteParserUNBOUNDED_, 0) -} - -func (s *Frame_rightContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Frame_rightContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Frame_rightContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterFrame_right(s) - } -} - -func (s *Frame_rightContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitFrame_right(s) - } -} - -func (p *SQLiteParser) Frame_right() (localctx IFrame_rightContext) { - this := p - _ = this - - localctx = NewFrame_rightContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 146, SQLiteParserRULE_frame_right) - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.SetState(1848) - p.GetErrorHandler().Sync(p) - switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 275, p.GetParserRuleContext()) { - case 1: - p.EnterOuterAlt(localctx, 1) - { - p.SetState(1838) - p.expr(0) - } - { - p.SetState(1839) - p.Match(SQLiteParserPRECEDING_) - } - - case 2: - p.EnterOuterAlt(localctx, 2) - { - p.SetState(1841) - p.expr(0) - } - { - p.SetState(1842) - p.Match(SQLiteParserFOLLOWING_) - } - - case 3: - p.EnterOuterAlt(localctx, 3) - { - p.SetState(1844) - p.Match(SQLiteParserCURRENT_) - } - { - p.SetState(1845) - p.Match(SQLiteParserROW_) - } - - case 4: - p.EnterOuterAlt(localctx, 4) - { - p.SetState(1846) - p.Match(SQLiteParserUNBOUNDED_) - } - { - p.SetState(1847) - p.Match(SQLiteParserFOLLOWING_) - } - - } - - return localctx -} - -// IFrame_singleContext is an interface to support dynamic dispatch. -type IFrame_singleContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - Expr() IExprContext - PRECEDING_() antlr.TerminalNode - UNBOUNDED_() antlr.TerminalNode - CURRENT_() antlr.TerminalNode - ROW_() antlr.TerminalNode - - // IsFrame_singleContext differentiates from other interfaces. - IsFrame_singleContext() -} - -type Frame_singleContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyFrame_singleContext() *Frame_singleContext { - var p = new(Frame_singleContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_frame_single - return p -} - -func (*Frame_singleContext) IsFrame_singleContext() {} - -func NewFrame_singleContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Frame_singleContext { - var p = new(Frame_singleContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_frame_single - - return p -} - -func (s *Frame_singleContext) GetParser() antlr.Parser { return s.parser } - -func (s *Frame_singleContext) Expr() IExprContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IExprContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IExprContext) -} - -func (s *Frame_singleContext) PRECEDING_() antlr.TerminalNode { - return s.GetToken(SQLiteParserPRECEDING_, 0) -} - -func (s *Frame_singleContext) UNBOUNDED_() antlr.TerminalNode { - return s.GetToken(SQLiteParserUNBOUNDED_, 0) -} - -func (s *Frame_singleContext) CURRENT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserCURRENT_, 0) -} - -func (s *Frame_singleContext) ROW_() antlr.TerminalNode { - return s.GetToken(SQLiteParserROW_, 0) -} - -func (s *Frame_singleContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Frame_singleContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Frame_singleContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterFrame_single(s) - } -} - -func (s *Frame_singleContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitFrame_single(s) - } -} - -func (p *SQLiteParser) Frame_single() (localctx IFrame_singleContext) { - this := p - _ = this - - localctx = NewFrame_singleContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 148, SQLiteParserRULE_frame_single) - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.SetState(1857) - p.GetErrorHandler().Sync(p) - switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 276, p.GetParserRuleContext()) { - case 1: - p.EnterOuterAlt(localctx, 1) - { - p.SetState(1850) - p.expr(0) - } - { - p.SetState(1851) - p.Match(SQLiteParserPRECEDING_) - } - - case 2: - p.EnterOuterAlt(localctx, 2) - { - p.SetState(1853) - p.Match(SQLiteParserUNBOUNDED_) - } - { - p.SetState(1854) - p.Match(SQLiteParserPRECEDING_) - } - - case 3: - p.EnterOuterAlt(localctx, 3) - { - p.SetState(1855) - p.Match(SQLiteParserCURRENT_) - } - { - p.SetState(1856) - p.Match(SQLiteParserROW_) - } - - } - - return localctx -} - -// IWindow_functionContext is an interface to support dynamic dispatch. -type IWindow_functionContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - AllOPEN_PAR() []antlr.TerminalNode - OPEN_PAR(i int) antlr.TerminalNode - Expr() IExprContext - AllCLOSE_PAR() []antlr.TerminalNode - CLOSE_PAR(i int) antlr.TerminalNode - OVER_() antlr.TerminalNode - Order_by_expr_asc_desc() IOrder_by_expr_asc_descContext - FIRST_VALUE_() antlr.TerminalNode - LAST_VALUE_() antlr.TerminalNode - Partition_by() IPartition_byContext - Frame_clause() IFrame_clauseContext - CUME_DIST_() antlr.TerminalNode - PERCENT_RANK_() antlr.TerminalNode - Order_by_expr() IOrder_by_exprContext - DENSE_RANK_() antlr.TerminalNode - RANK_() antlr.TerminalNode - ROW_NUMBER_() antlr.TerminalNode - LAG_() antlr.TerminalNode - LEAD_() antlr.TerminalNode - Offset() IOffsetContext - Default_value() IDefault_valueContext - NTH_VALUE_() antlr.TerminalNode - COMMA() antlr.TerminalNode - Signed_number() ISigned_numberContext - NTILE_() antlr.TerminalNode - - // IsWindow_functionContext differentiates from other interfaces. - IsWindow_functionContext() -} - -type Window_functionContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyWindow_functionContext() *Window_functionContext { - var p = new(Window_functionContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_window_function - return p -} - -func (*Window_functionContext) IsWindow_functionContext() {} - -func NewWindow_functionContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Window_functionContext { - var p = new(Window_functionContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_window_function - - return p -} - -func (s *Window_functionContext) GetParser() antlr.Parser { return s.parser } - -func (s *Window_functionContext) AllOPEN_PAR() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserOPEN_PAR) -} - -func (s *Window_functionContext) OPEN_PAR(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserOPEN_PAR, i) -} - -func (s *Window_functionContext) Expr() IExprContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IExprContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IExprContext) -} - -func (s *Window_functionContext) AllCLOSE_PAR() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserCLOSE_PAR) -} - -func (s *Window_functionContext) CLOSE_PAR(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserCLOSE_PAR, i) -} - -func (s *Window_functionContext) OVER_() antlr.TerminalNode { - return s.GetToken(SQLiteParserOVER_, 0) -} - -func (s *Window_functionContext) Order_by_expr_asc_desc() IOrder_by_expr_asc_descContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IOrder_by_expr_asc_descContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IOrder_by_expr_asc_descContext) -} - -func (s *Window_functionContext) FIRST_VALUE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserFIRST_VALUE_, 0) -} - -func (s *Window_functionContext) LAST_VALUE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserLAST_VALUE_, 0) -} - -func (s *Window_functionContext) Partition_by() IPartition_byContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IPartition_byContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IPartition_byContext) -} - -func (s *Window_functionContext) Frame_clause() IFrame_clauseContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IFrame_clauseContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IFrame_clauseContext) -} - -func (s *Window_functionContext) CUME_DIST_() antlr.TerminalNode { - return s.GetToken(SQLiteParserCUME_DIST_, 0) -} - -func (s *Window_functionContext) PERCENT_RANK_() antlr.TerminalNode { - return s.GetToken(SQLiteParserPERCENT_RANK_, 0) -} - -func (s *Window_functionContext) Order_by_expr() IOrder_by_exprContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IOrder_by_exprContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IOrder_by_exprContext) -} - -func (s *Window_functionContext) DENSE_RANK_() antlr.TerminalNode { - return s.GetToken(SQLiteParserDENSE_RANK_, 0) -} - -func (s *Window_functionContext) RANK_() antlr.TerminalNode { - return s.GetToken(SQLiteParserRANK_, 0) -} - -func (s *Window_functionContext) ROW_NUMBER_() antlr.TerminalNode { - return s.GetToken(SQLiteParserROW_NUMBER_, 0) -} - -func (s *Window_functionContext) LAG_() antlr.TerminalNode { - return s.GetToken(SQLiteParserLAG_, 0) -} - -func (s *Window_functionContext) LEAD_() antlr.TerminalNode { - return s.GetToken(SQLiteParserLEAD_, 0) -} - -func (s *Window_functionContext) Offset() IOffsetContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IOffsetContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IOffsetContext) -} - -func (s *Window_functionContext) Default_value() IDefault_valueContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IDefault_valueContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IDefault_valueContext) -} - -func (s *Window_functionContext) NTH_VALUE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserNTH_VALUE_, 0) -} - -func (s *Window_functionContext) COMMA() antlr.TerminalNode { - return s.GetToken(SQLiteParserCOMMA, 0) -} - -func (s *Window_functionContext) Signed_number() ISigned_numberContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ISigned_numberContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ISigned_numberContext) -} - -func (s *Window_functionContext) NTILE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserNTILE_, 0) -} - -func (s *Window_functionContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Window_functionContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Window_functionContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterWindow_function(s) - } -} - -func (s *Window_functionContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitWindow_function(s) - } -} - -func (p *SQLiteParser) Window_function() (localctx IWindow_functionContext) { - this := p - _ = this - - localctx = NewWindow_functionContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 150, SQLiteParserRULE_window_function) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.SetState(1944) - p.GetErrorHandler().Sync(p) - - switch p.GetTokenStream().LA(1) { - case SQLiteParserFIRST_VALUE_, SQLiteParserLAST_VALUE_: - p.EnterOuterAlt(localctx, 1) - { - p.SetState(1859) - _la = p.GetTokenStream().LA(1) - - if !(_la == SQLiteParserFIRST_VALUE_ || _la == SQLiteParserLAST_VALUE_) { - p.GetErrorHandler().RecoverInline(p) - } else { - p.GetErrorHandler().ReportMatch(p) - p.Consume() - } - } - { - p.SetState(1860) - p.Match(SQLiteParserOPEN_PAR) - } - { - p.SetState(1861) - p.expr(0) - } - { - p.SetState(1862) - p.Match(SQLiteParserCLOSE_PAR) - } - { - p.SetState(1863) - p.Match(SQLiteParserOVER_) - } - { - p.SetState(1864) - p.Match(SQLiteParserOPEN_PAR) - } - p.SetState(1866) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserPARTITION_ { - { - p.SetState(1865) - p.Partition_by() - } - - } - { - p.SetState(1868) - p.Order_by_expr_asc_desc() - } - p.SetState(1870) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if (int64((_la-128)) & ^0x3f) == 0 && ((int64(1)<<(_la-128))&2251799880794113) != 0 { - { - p.SetState(1869) - p.Frame_clause() - } - - } - { - p.SetState(1872) - p.Match(SQLiteParserCLOSE_PAR) - } - - case SQLiteParserCUME_DIST_, SQLiteParserPERCENT_RANK_: - p.EnterOuterAlt(localctx, 2) - { - p.SetState(1874) - _la = p.GetTokenStream().LA(1) - - if !(_la == SQLiteParserCUME_DIST_ || _la == SQLiteParserPERCENT_RANK_) { - p.GetErrorHandler().RecoverInline(p) - } else { - p.GetErrorHandler().ReportMatch(p) - p.Consume() - } - } - { - p.SetState(1875) - p.Match(SQLiteParserOPEN_PAR) - } - { - p.SetState(1876) - p.Match(SQLiteParserCLOSE_PAR) - } - { - p.SetState(1877) - p.Match(SQLiteParserOVER_) - } - { - p.SetState(1878) - p.Match(SQLiteParserOPEN_PAR) - } - p.SetState(1880) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserPARTITION_ { - { - p.SetState(1879) - p.Partition_by() - } - - } - p.SetState(1883) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserORDER_ { - { - p.SetState(1882) - p.Order_by_expr() - } - - } - { - p.SetState(1885) - p.Match(SQLiteParserCLOSE_PAR) - } - - case SQLiteParserDENSE_RANK_, SQLiteParserRANK_, SQLiteParserROW_NUMBER_: - p.EnterOuterAlt(localctx, 3) - { - p.SetState(1886) - _la = p.GetTokenStream().LA(1) - - if !((int64((_la-160)) & ^0x3f) == 0 && ((int64(1)<<(_la-160))&385) != 0) { - p.GetErrorHandler().RecoverInline(p) - } else { - p.GetErrorHandler().ReportMatch(p) - p.Consume() - } - } - { - p.SetState(1887) - p.Match(SQLiteParserOPEN_PAR) - } - { - p.SetState(1888) - p.Match(SQLiteParserCLOSE_PAR) - } - { - p.SetState(1889) - p.Match(SQLiteParserOVER_) - } - { - p.SetState(1890) - p.Match(SQLiteParserOPEN_PAR) - } - p.SetState(1892) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserPARTITION_ { - { - p.SetState(1891) - p.Partition_by() - } - - } - { - p.SetState(1894) - p.Order_by_expr_asc_desc() - } - { - p.SetState(1895) - p.Match(SQLiteParserCLOSE_PAR) - } - - case SQLiteParserLAG_, SQLiteParserLEAD_: - p.EnterOuterAlt(localctx, 4) - { - p.SetState(1897) - _la = p.GetTokenStream().LA(1) - - if !(_la == SQLiteParserLAG_ || _la == SQLiteParserLEAD_) { - p.GetErrorHandler().RecoverInline(p) - } else { - p.GetErrorHandler().ReportMatch(p) - p.Consume() - } - } - { - p.SetState(1898) - p.Match(SQLiteParserOPEN_PAR) - } - { - p.SetState(1899) - p.expr(0) - } - p.SetState(1901) - p.GetErrorHandler().Sync(p) - - if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 282, p.GetParserRuleContext()) == 1 { - { - p.SetState(1900) - p.Offset() - } - - } - p.SetState(1904) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserCOMMA { - { - p.SetState(1903) - p.Default_value() - } - - } - { - p.SetState(1906) - p.Match(SQLiteParserCLOSE_PAR) - } - { - p.SetState(1907) - p.Match(SQLiteParserOVER_) - } - { - p.SetState(1908) - p.Match(SQLiteParserOPEN_PAR) - } - p.SetState(1910) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserPARTITION_ { - { - p.SetState(1909) - p.Partition_by() - } - - } - { - p.SetState(1912) - p.Order_by_expr_asc_desc() - } - { - p.SetState(1913) - p.Match(SQLiteParserCLOSE_PAR) - } - - case SQLiteParserNTH_VALUE_: - p.EnterOuterAlt(localctx, 5) - { - p.SetState(1915) - p.Match(SQLiteParserNTH_VALUE_) - } - { - p.SetState(1916) - p.Match(SQLiteParserOPEN_PAR) - } - { - p.SetState(1917) - p.expr(0) - } - { - p.SetState(1918) - p.Match(SQLiteParserCOMMA) - } - { - p.SetState(1919) - p.Signed_number() - } - { - p.SetState(1920) - p.Match(SQLiteParserCLOSE_PAR) - } - { - p.SetState(1921) - p.Match(SQLiteParserOVER_) - } - { - p.SetState(1922) - p.Match(SQLiteParserOPEN_PAR) - } - p.SetState(1924) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserPARTITION_ { - { - p.SetState(1923) - p.Partition_by() - } - - } - { - p.SetState(1926) - p.Order_by_expr_asc_desc() - } - p.SetState(1928) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if (int64((_la-128)) & ^0x3f) == 0 && ((int64(1)<<(_la-128))&2251799880794113) != 0 { - { - p.SetState(1927) - p.Frame_clause() - } - - } - { - p.SetState(1930) - p.Match(SQLiteParserCLOSE_PAR) - } - - case SQLiteParserNTILE_: - p.EnterOuterAlt(localctx, 6) - { - p.SetState(1932) - p.Match(SQLiteParserNTILE_) - } - { - p.SetState(1933) - p.Match(SQLiteParserOPEN_PAR) - } - { - p.SetState(1934) - p.expr(0) - } - { - p.SetState(1935) - p.Match(SQLiteParserCLOSE_PAR) - } - { - p.SetState(1936) - p.Match(SQLiteParserOVER_) - } - { - p.SetState(1937) - p.Match(SQLiteParserOPEN_PAR) - } - p.SetState(1939) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserPARTITION_ { - { - p.SetState(1938) - p.Partition_by() - } - - } - { - p.SetState(1941) - p.Order_by_expr_asc_desc() - } - { - p.SetState(1942) - p.Match(SQLiteParserCLOSE_PAR) - } - - default: - panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) - } - - return localctx -} - -// IOffsetContext is an interface to support dynamic dispatch. -type IOffsetContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - COMMA() antlr.TerminalNode - Signed_number() ISigned_numberContext - - // IsOffsetContext differentiates from other interfaces. - IsOffsetContext() -} - -type OffsetContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyOffsetContext() *OffsetContext { - var p = new(OffsetContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_offset - return p -} - -func (*OffsetContext) IsOffsetContext() {} - -func NewOffsetContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *OffsetContext { - var p = new(OffsetContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_offset - - return p -} - -func (s *OffsetContext) GetParser() antlr.Parser { return s.parser } - -func (s *OffsetContext) COMMA() antlr.TerminalNode { - return s.GetToken(SQLiteParserCOMMA, 0) -} - -func (s *OffsetContext) Signed_number() ISigned_numberContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ISigned_numberContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ISigned_numberContext) -} - -func (s *OffsetContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *OffsetContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *OffsetContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterOffset(s) - } -} - -func (s *OffsetContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitOffset(s) - } -} - -func (p *SQLiteParser) Offset() (localctx IOffsetContext) { - this := p - _ = this - - localctx = NewOffsetContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 152, SQLiteParserRULE_offset) - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(1946) - p.Match(SQLiteParserCOMMA) - } - { - p.SetState(1947) - p.Signed_number() - } - - return localctx -} - -// IDefault_valueContext is an interface to support dynamic dispatch. -type IDefault_valueContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - COMMA() antlr.TerminalNode - Signed_number() ISigned_numberContext - - // IsDefault_valueContext differentiates from other interfaces. - IsDefault_valueContext() -} - -type Default_valueContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyDefault_valueContext() *Default_valueContext { - var p = new(Default_valueContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_default_value - return p -} - -func (*Default_valueContext) IsDefault_valueContext() {} - -func NewDefault_valueContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Default_valueContext { - var p = new(Default_valueContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_default_value - - return p -} - -func (s *Default_valueContext) GetParser() antlr.Parser { return s.parser } - -func (s *Default_valueContext) COMMA() antlr.TerminalNode { - return s.GetToken(SQLiteParserCOMMA, 0) -} - -func (s *Default_valueContext) Signed_number() ISigned_numberContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ISigned_numberContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ISigned_numberContext) -} - -func (s *Default_valueContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Default_valueContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Default_valueContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterDefault_value(s) - } -} - -func (s *Default_valueContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitDefault_value(s) - } -} - -func (p *SQLiteParser) Default_value() (localctx IDefault_valueContext) { - this := p - _ = this - - localctx = NewDefault_valueContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 154, SQLiteParserRULE_default_value) - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(1949) - p.Match(SQLiteParserCOMMA) - } - { - p.SetState(1950) - p.Signed_number() - } - - return localctx -} - -// IPartition_byContext is an interface to support dynamic dispatch. -type IPartition_byContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - PARTITION_() antlr.TerminalNode - BY_() antlr.TerminalNode - AllExpr() []IExprContext - Expr(i int) IExprContext - - // IsPartition_byContext differentiates from other interfaces. - IsPartition_byContext() -} - -type Partition_byContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyPartition_byContext() *Partition_byContext { - var p = new(Partition_byContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_partition_by - return p -} - -func (*Partition_byContext) IsPartition_byContext() {} - -func NewPartition_byContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Partition_byContext { - var p = new(Partition_byContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_partition_by - - return p -} - -func (s *Partition_byContext) GetParser() antlr.Parser { return s.parser } - -func (s *Partition_byContext) PARTITION_() antlr.TerminalNode { - return s.GetToken(SQLiteParserPARTITION_, 0) -} - -func (s *Partition_byContext) BY_() antlr.TerminalNode { - return s.GetToken(SQLiteParserBY_, 0) -} - -func (s *Partition_byContext) AllExpr() []IExprContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IExprContext); ok { - len++ - } - } - - tst := make([]IExprContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IExprContext); ok { - tst[i] = t.(IExprContext) - i++ - } - } - - return tst -} - -func (s *Partition_byContext) Expr(i int) IExprContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IExprContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IExprContext) -} - -func (s *Partition_byContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Partition_byContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Partition_byContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterPartition_by(s) - } -} - -func (s *Partition_byContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitPartition_by(s) - } -} - -func (p *SQLiteParser) Partition_by() (localctx IPartition_byContext) { - this := p - _ = this - - localctx = NewPartition_byContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 156, SQLiteParserRULE_partition_by) - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - var _alt int - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(1952) - p.Match(SQLiteParserPARTITION_) - } - { - p.SetState(1953) - p.Match(SQLiteParserBY_) - } - p.SetState(1955) - p.GetErrorHandler().Sync(p) - _alt = 1 - for ok := true; ok; ok = _alt != 2 && _alt != antlr.ATNInvalidAltNumber { - switch _alt { - case 1: - { - p.SetState(1954) - p.expr(0) - } - - default: - panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) - } - - p.SetState(1957) - p.GetErrorHandler().Sync(p) - _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 289, p.GetParserRuleContext()) - } - - return localctx -} - -// IOrder_by_exprContext is an interface to support dynamic dispatch. -type IOrder_by_exprContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - ORDER_() antlr.TerminalNode - BY_() antlr.TerminalNode - AllExpr() []IExprContext - Expr(i int) IExprContext - - // IsOrder_by_exprContext differentiates from other interfaces. - IsOrder_by_exprContext() -} - -type Order_by_exprContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyOrder_by_exprContext() *Order_by_exprContext { - var p = new(Order_by_exprContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_order_by_expr - return p -} - -func (*Order_by_exprContext) IsOrder_by_exprContext() {} - -func NewOrder_by_exprContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Order_by_exprContext { - var p = new(Order_by_exprContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_order_by_expr - - return p -} - -func (s *Order_by_exprContext) GetParser() antlr.Parser { return s.parser } - -func (s *Order_by_exprContext) ORDER_() antlr.TerminalNode { - return s.GetToken(SQLiteParserORDER_, 0) -} - -func (s *Order_by_exprContext) BY_() antlr.TerminalNode { - return s.GetToken(SQLiteParserBY_, 0) -} - -func (s *Order_by_exprContext) AllExpr() []IExprContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IExprContext); ok { - len++ - } - } - - tst := make([]IExprContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IExprContext); ok { - tst[i] = t.(IExprContext) - i++ - } - } - - return tst -} - -func (s *Order_by_exprContext) Expr(i int) IExprContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IExprContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IExprContext) -} - -func (s *Order_by_exprContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Order_by_exprContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Order_by_exprContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterOrder_by_expr(s) - } -} - -func (s *Order_by_exprContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitOrder_by_expr(s) - } -} - -func (p *SQLiteParser) Order_by_expr() (localctx IOrder_by_exprContext) { - this := p - _ = this - - localctx = NewOrder_by_exprContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 158, SQLiteParserRULE_order_by_expr) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(1959) - p.Match(SQLiteParserORDER_) - } - { - p.SetState(1960) - p.Match(SQLiteParserBY_) - } - p.SetState(1962) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for ok := true; ok; ok = ((int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&-33552632) != 0) || ((int64((_la-64)) & ^0x3f) == 0 && ((int64(1)<<(_la-64))&-1152921504606846977) != 0) || ((int64((_la-128)) & ^0x3f) == 0 && ((int64(1)<<(_la-128))&4476578029606273023) != 0) { - { - p.SetState(1961) - p.expr(0) - } - - p.SetState(1964) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - - return localctx -} - -// IOrder_by_expr_asc_descContext is an interface to support dynamic dispatch. -type IOrder_by_expr_asc_descContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - ORDER_() antlr.TerminalNode - BY_() antlr.TerminalNode - Expr_asc_desc() IExpr_asc_descContext - - // IsOrder_by_expr_asc_descContext differentiates from other interfaces. - IsOrder_by_expr_asc_descContext() -} - -type Order_by_expr_asc_descContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyOrder_by_expr_asc_descContext() *Order_by_expr_asc_descContext { - var p = new(Order_by_expr_asc_descContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_order_by_expr_asc_desc - return p -} - -func (*Order_by_expr_asc_descContext) IsOrder_by_expr_asc_descContext() {} - -func NewOrder_by_expr_asc_descContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Order_by_expr_asc_descContext { - var p = new(Order_by_expr_asc_descContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_order_by_expr_asc_desc - - return p -} - -func (s *Order_by_expr_asc_descContext) GetParser() antlr.Parser { return s.parser } - -func (s *Order_by_expr_asc_descContext) ORDER_() antlr.TerminalNode { - return s.GetToken(SQLiteParserORDER_, 0) -} - -func (s *Order_by_expr_asc_descContext) BY_() antlr.TerminalNode { - return s.GetToken(SQLiteParserBY_, 0) -} - -func (s *Order_by_expr_asc_descContext) Expr_asc_desc() IExpr_asc_descContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IExpr_asc_descContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IExpr_asc_descContext) -} - -func (s *Order_by_expr_asc_descContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Order_by_expr_asc_descContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Order_by_expr_asc_descContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterOrder_by_expr_asc_desc(s) - } -} - -func (s *Order_by_expr_asc_descContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitOrder_by_expr_asc_desc(s) - } -} - -func (p *SQLiteParser) Order_by_expr_asc_desc() (localctx IOrder_by_expr_asc_descContext) { - this := p - _ = this - - localctx = NewOrder_by_expr_asc_descContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 160, SQLiteParserRULE_order_by_expr_asc_desc) - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(1966) - p.Match(SQLiteParserORDER_) - } - { - p.SetState(1967) - p.Match(SQLiteParserBY_) - } - { - p.SetState(1968) - p.Expr_asc_desc() - } - - return localctx -} - -// IExpr_asc_descContext is an interface to support dynamic dispatch. -type IExpr_asc_descContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - AllExpr() []IExprContext - Expr(i int) IExprContext - AllAsc_desc() []IAsc_descContext - Asc_desc(i int) IAsc_descContext - AllCOMMA() []antlr.TerminalNode - COMMA(i int) antlr.TerminalNode - - // IsExpr_asc_descContext differentiates from other interfaces. - IsExpr_asc_descContext() -} - -type Expr_asc_descContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyExpr_asc_descContext() *Expr_asc_descContext { - var p = new(Expr_asc_descContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_expr_asc_desc - return p -} - -func (*Expr_asc_descContext) IsExpr_asc_descContext() {} - -func NewExpr_asc_descContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Expr_asc_descContext { - var p = new(Expr_asc_descContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_expr_asc_desc - - return p -} - -func (s *Expr_asc_descContext) GetParser() antlr.Parser { return s.parser } - -func (s *Expr_asc_descContext) AllExpr() []IExprContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IExprContext); ok { - len++ - } - } - - tst := make([]IExprContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IExprContext); ok { - tst[i] = t.(IExprContext) - i++ - } - } - - return tst -} - -func (s *Expr_asc_descContext) Expr(i int) IExprContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IExprContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IExprContext) -} - -func (s *Expr_asc_descContext) AllAsc_desc() []IAsc_descContext { - children := s.GetChildren() - len := 0 - for _, ctx := range children { - if _, ok := ctx.(IAsc_descContext); ok { - len++ - } - } - - tst := make([]IAsc_descContext, len) - i := 0 - for _, ctx := range children { - if t, ok := ctx.(IAsc_descContext); ok { - tst[i] = t.(IAsc_descContext) - i++ - } - } - - return tst -} - -func (s *Expr_asc_descContext) Asc_desc(i int) IAsc_descContext { - var t antlr.RuleContext - j := 0 - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IAsc_descContext); ok { - if j == i { - t = ctx.(antlr.RuleContext) - break - } - j++ - } - } - - if t == nil { - return nil - } - - return t.(IAsc_descContext) -} - -func (s *Expr_asc_descContext) AllCOMMA() []antlr.TerminalNode { - return s.GetTokens(SQLiteParserCOMMA) -} - -func (s *Expr_asc_descContext) COMMA(i int) antlr.TerminalNode { - return s.GetToken(SQLiteParserCOMMA, i) -} - -func (s *Expr_asc_descContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Expr_asc_descContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Expr_asc_descContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterExpr_asc_desc(s) - } -} - -func (s *Expr_asc_descContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitExpr_asc_desc(s) - } -} - -func (p *SQLiteParser) Expr_asc_desc() (localctx IExpr_asc_descContext) { - this := p - _ = this - - localctx = NewExpr_asc_descContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 162, SQLiteParserRULE_expr_asc_desc) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(1970) - p.expr(0) - } - p.SetState(1972) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserASC_ || _la == SQLiteParserDESC_ { - { - p.SetState(1971) - p.Asc_desc() - } - - } - p.SetState(1981) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - for _la == SQLiteParserCOMMA { - { - p.SetState(1974) - p.Match(SQLiteParserCOMMA) - } - { - p.SetState(1975) - p.expr(0) - } - p.SetState(1977) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - - if _la == SQLiteParserASC_ || _la == SQLiteParserDESC_ { - { - p.SetState(1976) - p.Asc_desc() - } - - } - - p.SetState(1983) - p.GetErrorHandler().Sync(p) - _la = p.GetTokenStream().LA(1) - } - - return localctx -} - -// IInitial_selectContext is an interface to support dynamic dispatch. -type IInitial_selectContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - Select_stmt() ISelect_stmtContext - - // IsInitial_selectContext differentiates from other interfaces. - IsInitial_selectContext() -} - -type Initial_selectContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyInitial_selectContext() *Initial_selectContext { - var p = new(Initial_selectContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_initial_select - return p -} - -func (*Initial_selectContext) IsInitial_selectContext() {} - -func NewInitial_selectContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Initial_selectContext { - var p = new(Initial_selectContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_initial_select - - return p -} - -func (s *Initial_selectContext) GetParser() antlr.Parser { return s.parser } - -func (s *Initial_selectContext) Select_stmt() ISelect_stmtContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ISelect_stmtContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ISelect_stmtContext) -} - -func (s *Initial_selectContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Initial_selectContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Initial_selectContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterInitial_select(s) - } -} - -func (s *Initial_selectContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitInitial_select(s) - } -} - -func (p *SQLiteParser) Initial_select() (localctx IInitial_selectContext) { - this := p - _ = this - - localctx = NewInitial_selectContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 164, SQLiteParserRULE_initial_select) - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(1984) - p.Select_stmt() - } - - return localctx -} - -// IRecursive_selectContext is an interface to support dynamic dispatch. -type IRecursive_selectContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - Select_stmt() ISelect_stmtContext - - // IsRecursive_selectContext differentiates from other interfaces. - IsRecursive_selectContext() -} - -type Recursive_selectContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyRecursive_selectContext() *Recursive_selectContext { - var p = new(Recursive_selectContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_recursive_select - return p -} - -func (*Recursive_selectContext) IsRecursive_selectContext() {} - -func NewRecursive_selectContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Recursive_selectContext { - var p = new(Recursive_selectContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_recursive_select - - return p -} - -func (s *Recursive_selectContext) GetParser() antlr.Parser { return s.parser } - -func (s *Recursive_selectContext) Select_stmt() ISelect_stmtContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(ISelect_stmtContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(ISelect_stmtContext) -} - -func (s *Recursive_selectContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Recursive_selectContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Recursive_selectContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterRecursive_select(s) - } -} - -func (s *Recursive_selectContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitRecursive_select(s) - } -} - -func (p *SQLiteParser) Recursive_select() (localctx IRecursive_selectContext) { - this := p - _ = this - - localctx = NewRecursive_selectContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 166, SQLiteParserRULE_recursive_select) - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(1986) - p.Select_stmt() - } - - return localctx -} - -// IUnary_operatorContext is an interface to support dynamic dispatch. -type IUnary_operatorContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - MINUS() antlr.TerminalNode - PLUS() antlr.TerminalNode - TILDE() antlr.TerminalNode - NOT_() antlr.TerminalNode - - // IsUnary_operatorContext differentiates from other interfaces. - IsUnary_operatorContext() -} - -type Unary_operatorContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyUnary_operatorContext() *Unary_operatorContext { - var p = new(Unary_operatorContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_unary_operator - return p -} - -func (*Unary_operatorContext) IsUnary_operatorContext() {} - -func NewUnary_operatorContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Unary_operatorContext { - var p = new(Unary_operatorContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_unary_operator - - return p -} - -func (s *Unary_operatorContext) GetParser() antlr.Parser { return s.parser } - -func (s *Unary_operatorContext) MINUS() antlr.TerminalNode { - return s.GetToken(SQLiteParserMINUS, 0) -} - -func (s *Unary_operatorContext) PLUS() antlr.TerminalNode { - return s.GetToken(SQLiteParserPLUS, 0) -} - -func (s *Unary_operatorContext) TILDE() antlr.TerminalNode { - return s.GetToken(SQLiteParserTILDE, 0) -} - -func (s *Unary_operatorContext) NOT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserNOT_, 0) -} - -func (s *Unary_operatorContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Unary_operatorContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Unary_operatorContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterUnary_operator(s) - } -} - -func (s *Unary_operatorContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitUnary_operator(s) - } -} - -func (p *SQLiteParser) Unary_operator() (localctx IUnary_operatorContext) { - this := p - _ = this - - localctx = NewUnary_operatorContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 168, SQLiteParserRULE_unary_operator) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(1988) - _la = p.GetTokenStream().LA(1) - - if !(((int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&1792) != 0) || _la == SQLiteParserNOT_) { - p.GetErrorHandler().RecoverInline(p) - } else { - p.GetErrorHandler().ReportMatch(p) - p.Consume() - } - } - - return localctx -} - -// IError_messageContext is an interface to support dynamic dispatch. -type IError_messageContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - STRING_LITERAL() antlr.TerminalNode - - // IsError_messageContext differentiates from other interfaces. - IsError_messageContext() -} - -type Error_messageContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyError_messageContext() *Error_messageContext { - var p = new(Error_messageContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_error_message - return p -} - -func (*Error_messageContext) IsError_messageContext() {} - -func NewError_messageContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Error_messageContext { - var p = new(Error_messageContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_error_message - - return p -} - -func (s *Error_messageContext) GetParser() antlr.Parser { return s.parser } - -func (s *Error_messageContext) STRING_LITERAL() antlr.TerminalNode { - return s.GetToken(SQLiteParserSTRING_LITERAL, 0) -} - -func (s *Error_messageContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Error_messageContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Error_messageContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterError_message(s) - } -} - -func (s *Error_messageContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitError_message(s) - } -} - -func (p *SQLiteParser) Error_message() (localctx IError_messageContext) { - this := p - _ = this - - localctx = NewError_messageContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 170, SQLiteParserRULE_error_message) - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(1990) - p.Match(SQLiteParserSTRING_LITERAL) - } - - return localctx -} - -// IModule_argumentContext is an interface to support dynamic dispatch. -type IModule_argumentContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - Expr() IExprContext - Column_def() IColumn_defContext - - // IsModule_argumentContext differentiates from other interfaces. - IsModule_argumentContext() -} - -type Module_argumentContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyModule_argumentContext() *Module_argumentContext { - var p = new(Module_argumentContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_module_argument - return p -} - -func (*Module_argumentContext) IsModule_argumentContext() {} - -func NewModule_argumentContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Module_argumentContext { - var p = new(Module_argumentContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_module_argument - - return p -} - -func (s *Module_argumentContext) GetParser() antlr.Parser { return s.parser } - -func (s *Module_argumentContext) Expr() IExprContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IExprContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IExprContext) -} - -func (s *Module_argumentContext) Column_def() IColumn_defContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IColumn_defContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IColumn_defContext) -} - -func (s *Module_argumentContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Module_argumentContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Module_argumentContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterModule_argument(s) - } -} - -func (s *Module_argumentContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitModule_argument(s) - } -} - -func (p *SQLiteParser) Module_argument() (localctx IModule_argumentContext) { - this := p - _ = this - - localctx = NewModule_argumentContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 172, SQLiteParserRULE_module_argument) - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.SetState(1994) - p.GetErrorHandler().Sync(p) - switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 294, p.GetParserRuleContext()) { - case 1: - p.EnterOuterAlt(localctx, 1) - { - p.SetState(1992) - p.expr(0) - } - - case 2: - p.EnterOuterAlt(localctx, 2) - { - p.SetState(1993) - p.Column_def() - } - - } - - return localctx -} - -// IColumn_aliasContext is an interface to support dynamic dispatch. -type IColumn_aliasContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - IDENTIFIER() antlr.TerminalNode - STRING_LITERAL() antlr.TerminalNode - - // IsColumn_aliasContext differentiates from other interfaces. - IsColumn_aliasContext() -} - -type Column_aliasContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyColumn_aliasContext() *Column_aliasContext { - var p = new(Column_aliasContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_column_alias - return p -} - -func (*Column_aliasContext) IsColumn_aliasContext() {} - -func NewColumn_aliasContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Column_aliasContext { - var p = new(Column_aliasContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_column_alias - - return p -} - -func (s *Column_aliasContext) GetParser() antlr.Parser { return s.parser } - -func (s *Column_aliasContext) IDENTIFIER() antlr.TerminalNode { - return s.GetToken(SQLiteParserIDENTIFIER, 0) -} - -func (s *Column_aliasContext) STRING_LITERAL() antlr.TerminalNode { - return s.GetToken(SQLiteParserSTRING_LITERAL, 0) -} - -func (s *Column_aliasContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Column_aliasContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Column_aliasContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterColumn_alias(s) - } -} - -func (s *Column_aliasContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitColumn_alias(s) - } -} - -func (p *SQLiteParser) Column_alias() (localctx IColumn_aliasContext) { - this := p - _ = this - - localctx = NewColumn_aliasContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 174, SQLiteParserRULE_column_alias) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(1996) - _la = p.GetTokenStream().LA(1) - - if !(_la == SQLiteParserIDENTIFIER || _la == SQLiteParserSTRING_LITERAL) { - p.GetErrorHandler().RecoverInline(p) - } else { - p.GetErrorHandler().ReportMatch(p) - p.Consume() - } - } - - return localctx -} - -// IKeywordContext is an interface to support dynamic dispatch. -type IKeywordContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - ABORT_() antlr.TerminalNode - ACTION_() antlr.TerminalNode - ADD_() antlr.TerminalNode - AFTER_() antlr.TerminalNode - ALL_() antlr.TerminalNode - ALTER_() antlr.TerminalNode - ANALYZE_() antlr.TerminalNode - AND_() antlr.TerminalNode - AS_() antlr.TerminalNode - ASC_() antlr.TerminalNode - ATTACH_() antlr.TerminalNode - AUTOINCREMENT_() antlr.TerminalNode - BEFORE_() antlr.TerminalNode - BEGIN_() antlr.TerminalNode - BETWEEN_() antlr.TerminalNode - BY_() antlr.TerminalNode - CASCADE_() antlr.TerminalNode - CASE_() antlr.TerminalNode - CAST_() antlr.TerminalNode - CHECK_() antlr.TerminalNode - COLLATE_() antlr.TerminalNode - COLUMN_() antlr.TerminalNode - COMMIT_() antlr.TerminalNode - CONFLICT_() antlr.TerminalNode - CONSTRAINT_() antlr.TerminalNode - CREATE_() antlr.TerminalNode - CROSS_() antlr.TerminalNode - CURRENT_DATE_() antlr.TerminalNode - CURRENT_TIME_() antlr.TerminalNode - CURRENT_TIMESTAMP_() antlr.TerminalNode - DATABASE_() antlr.TerminalNode - DEFAULT_() antlr.TerminalNode - DEFERRABLE_() antlr.TerminalNode - DEFERRED_() antlr.TerminalNode - DELETE_() antlr.TerminalNode - DESC_() antlr.TerminalNode - DETACH_() antlr.TerminalNode - DISTINCT_() antlr.TerminalNode - DROP_() antlr.TerminalNode - EACH_() antlr.TerminalNode - ELSE_() antlr.TerminalNode - END_() antlr.TerminalNode - ESCAPE_() antlr.TerminalNode - EXCEPT_() antlr.TerminalNode - EXCLUSIVE_() antlr.TerminalNode - EXISTS_() antlr.TerminalNode - EXPLAIN_() antlr.TerminalNode - FAIL_() antlr.TerminalNode - FOR_() antlr.TerminalNode - FOREIGN_() antlr.TerminalNode - FROM_() antlr.TerminalNode - FULL_() antlr.TerminalNode - GLOB_() antlr.TerminalNode - GROUP_() antlr.TerminalNode - HAVING_() antlr.TerminalNode - IF_() antlr.TerminalNode - IGNORE_() antlr.TerminalNode - IMMEDIATE_() antlr.TerminalNode - IN_() antlr.TerminalNode - INDEX_() antlr.TerminalNode - INDEXED_() antlr.TerminalNode - INITIALLY_() antlr.TerminalNode - INNER_() antlr.TerminalNode - INSERT_() antlr.TerminalNode - INSTEAD_() antlr.TerminalNode - INTERSECT_() antlr.TerminalNode - INTO_() antlr.TerminalNode - IS_() antlr.TerminalNode - ISNULL_() antlr.TerminalNode - JOIN_() antlr.TerminalNode - KEY_() antlr.TerminalNode - LEFT_() antlr.TerminalNode - LIKE_() antlr.TerminalNode - LIMIT_() antlr.TerminalNode - MATCH_() antlr.TerminalNode - NATURAL_() antlr.TerminalNode - NO_() antlr.TerminalNode - NOT_() antlr.TerminalNode - NOTNULL_() antlr.TerminalNode - NULL_() antlr.TerminalNode - OF_() antlr.TerminalNode - OFFSET_() antlr.TerminalNode - ON_() antlr.TerminalNode - OR_() antlr.TerminalNode - ORDER_() antlr.TerminalNode - OUTER_() antlr.TerminalNode - PLAN_() antlr.TerminalNode - PRAGMA_() antlr.TerminalNode - PRIMARY_() antlr.TerminalNode - QUERY_() antlr.TerminalNode - RAISE_() antlr.TerminalNode - RECURSIVE_() antlr.TerminalNode - REFERENCES_() antlr.TerminalNode - REGEXP_() antlr.TerminalNode - REINDEX_() antlr.TerminalNode - RELEASE_() antlr.TerminalNode - RENAME_() antlr.TerminalNode - REPLACE_() antlr.TerminalNode - RESTRICT_() antlr.TerminalNode - RIGHT_() antlr.TerminalNode - ROLLBACK_() antlr.TerminalNode - ROW_() antlr.TerminalNode - ROWS_() antlr.TerminalNode - SAVEPOINT_() antlr.TerminalNode - SELECT_() antlr.TerminalNode - SET_() antlr.TerminalNode - TABLE_() antlr.TerminalNode - TEMP_() antlr.TerminalNode - TEMPORARY_() antlr.TerminalNode - THEN_() antlr.TerminalNode - TO_() antlr.TerminalNode - TRANSACTION_() antlr.TerminalNode - TRIGGER_() antlr.TerminalNode - UNION_() antlr.TerminalNode - UNIQUE_() antlr.TerminalNode - UPDATE_() antlr.TerminalNode - USING_() antlr.TerminalNode - VACUUM_() antlr.TerminalNode - VALUES_() antlr.TerminalNode - VIEW_() antlr.TerminalNode - VIRTUAL_() antlr.TerminalNode - WHEN_() antlr.TerminalNode - WHERE_() antlr.TerminalNode - WITH_() antlr.TerminalNode - WITHOUT_() antlr.TerminalNode - FIRST_VALUE_() antlr.TerminalNode - OVER_() antlr.TerminalNode - PARTITION_() antlr.TerminalNode - RANGE_() antlr.TerminalNode - PRECEDING_() antlr.TerminalNode - UNBOUNDED_() antlr.TerminalNode - CURRENT_() antlr.TerminalNode - FOLLOWING_() antlr.TerminalNode - CUME_DIST_() antlr.TerminalNode - DENSE_RANK_() antlr.TerminalNode - LAG_() antlr.TerminalNode - LAST_VALUE_() antlr.TerminalNode - LEAD_() antlr.TerminalNode - NTH_VALUE_() antlr.TerminalNode - NTILE_() antlr.TerminalNode - PERCENT_RANK_() antlr.TerminalNode - RANK_() antlr.TerminalNode - ROW_NUMBER_() antlr.TerminalNode - GENERATED_() antlr.TerminalNode - ALWAYS_() antlr.TerminalNode - STORED_() antlr.TerminalNode - TRUE_() antlr.TerminalNode - FALSE_() antlr.TerminalNode - WINDOW_() antlr.TerminalNode - NULLS_() antlr.TerminalNode - FIRST_() antlr.TerminalNode - LAST_() antlr.TerminalNode - FILTER_() antlr.TerminalNode - GROUPS_() antlr.TerminalNode - EXCLUDE_() antlr.TerminalNode - - // IsKeywordContext differentiates from other interfaces. - IsKeywordContext() -} - -type KeywordContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyKeywordContext() *KeywordContext { - var p = new(KeywordContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_keyword - return p -} - -func (*KeywordContext) IsKeywordContext() {} - -func NewKeywordContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *KeywordContext { - var p = new(KeywordContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_keyword - - return p -} - -func (s *KeywordContext) GetParser() antlr.Parser { return s.parser } - -func (s *KeywordContext) ABORT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserABORT_, 0) -} - -func (s *KeywordContext) ACTION_() antlr.TerminalNode { - return s.GetToken(SQLiteParserACTION_, 0) -} - -func (s *KeywordContext) ADD_() antlr.TerminalNode { - return s.GetToken(SQLiteParserADD_, 0) -} - -func (s *KeywordContext) AFTER_() antlr.TerminalNode { - return s.GetToken(SQLiteParserAFTER_, 0) -} - -func (s *KeywordContext) ALL_() antlr.TerminalNode { - return s.GetToken(SQLiteParserALL_, 0) -} - -func (s *KeywordContext) ALTER_() antlr.TerminalNode { - return s.GetToken(SQLiteParserALTER_, 0) -} - -func (s *KeywordContext) ANALYZE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserANALYZE_, 0) -} - -func (s *KeywordContext) AND_() antlr.TerminalNode { - return s.GetToken(SQLiteParserAND_, 0) -} - -func (s *KeywordContext) AS_() antlr.TerminalNode { - return s.GetToken(SQLiteParserAS_, 0) -} - -func (s *KeywordContext) ASC_() antlr.TerminalNode { - return s.GetToken(SQLiteParserASC_, 0) -} - -func (s *KeywordContext) ATTACH_() antlr.TerminalNode { - return s.GetToken(SQLiteParserATTACH_, 0) -} - -func (s *KeywordContext) AUTOINCREMENT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserAUTOINCREMENT_, 0) -} - -func (s *KeywordContext) BEFORE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserBEFORE_, 0) -} - -func (s *KeywordContext) BEGIN_() antlr.TerminalNode { - return s.GetToken(SQLiteParserBEGIN_, 0) -} - -func (s *KeywordContext) BETWEEN_() antlr.TerminalNode { - return s.GetToken(SQLiteParserBETWEEN_, 0) -} - -func (s *KeywordContext) BY_() antlr.TerminalNode { - return s.GetToken(SQLiteParserBY_, 0) -} - -func (s *KeywordContext) CASCADE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserCASCADE_, 0) -} - -func (s *KeywordContext) CASE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserCASE_, 0) -} - -func (s *KeywordContext) CAST_() antlr.TerminalNode { - return s.GetToken(SQLiteParserCAST_, 0) -} - -func (s *KeywordContext) CHECK_() antlr.TerminalNode { - return s.GetToken(SQLiteParserCHECK_, 0) -} - -func (s *KeywordContext) COLLATE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserCOLLATE_, 0) -} - -func (s *KeywordContext) COLUMN_() antlr.TerminalNode { - return s.GetToken(SQLiteParserCOLUMN_, 0) -} - -func (s *KeywordContext) COMMIT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserCOMMIT_, 0) -} - -func (s *KeywordContext) CONFLICT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserCONFLICT_, 0) -} - -func (s *KeywordContext) CONSTRAINT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserCONSTRAINT_, 0) -} - -func (s *KeywordContext) CREATE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserCREATE_, 0) -} - -func (s *KeywordContext) CROSS_() antlr.TerminalNode { - return s.GetToken(SQLiteParserCROSS_, 0) -} - -func (s *KeywordContext) CURRENT_DATE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserCURRENT_DATE_, 0) -} - -func (s *KeywordContext) CURRENT_TIME_() antlr.TerminalNode { - return s.GetToken(SQLiteParserCURRENT_TIME_, 0) -} - -func (s *KeywordContext) CURRENT_TIMESTAMP_() antlr.TerminalNode { - return s.GetToken(SQLiteParserCURRENT_TIMESTAMP_, 0) -} - -func (s *KeywordContext) DATABASE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserDATABASE_, 0) -} - -func (s *KeywordContext) DEFAULT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserDEFAULT_, 0) -} - -func (s *KeywordContext) DEFERRABLE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserDEFERRABLE_, 0) -} - -func (s *KeywordContext) DEFERRED_() antlr.TerminalNode { - return s.GetToken(SQLiteParserDEFERRED_, 0) -} - -func (s *KeywordContext) DELETE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserDELETE_, 0) -} - -func (s *KeywordContext) DESC_() antlr.TerminalNode { - return s.GetToken(SQLiteParserDESC_, 0) -} - -func (s *KeywordContext) DETACH_() antlr.TerminalNode { - return s.GetToken(SQLiteParserDETACH_, 0) -} - -func (s *KeywordContext) DISTINCT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserDISTINCT_, 0) -} - -func (s *KeywordContext) DROP_() antlr.TerminalNode { - return s.GetToken(SQLiteParserDROP_, 0) -} - -func (s *KeywordContext) EACH_() antlr.TerminalNode { - return s.GetToken(SQLiteParserEACH_, 0) -} - -func (s *KeywordContext) ELSE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserELSE_, 0) -} - -func (s *KeywordContext) END_() antlr.TerminalNode { - return s.GetToken(SQLiteParserEND_, 0) -} - -func (s *KeywordContext) ESCAPE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserESCAPE_, 0) -} - -func (s *KeywordContext) EXCEPT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserEXCEPT_, 0) -} - -func (s *KeywordContext) EXCLUSIVE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserEXCLUSIVE_, 0) -} - -func (s *KeywordContext) EXISTS_() antlr.TerminalNode { - return s.GetToken(SQLiteParserEXISTS_, 0) -} - -func (s *KeywordContext) EXPLAIN_() antlr.TerminalNode { - return s.GetToken(SQLiteParserEXPLAIN_, 0) -} - -func (s *KeywordContext) FAIL_() antlr.TerminalNode { - return s.GetToken(SQLiteParserFAIL_, 0) -} - -func (s *KeywordContext) FOR_() antlr.TerminalNode { - return s.GetToken(SQLiteParserFOR_, 0) -} - -func (s *KeywordContext) FOREIGN_() antlr.TerminalNode { - return s.GetToken(SQLiteParserFOREIGN_, 0) -} - -func (s *KeywordContext) FROM_() antlr.TerminalNode { - return s.GetToken(SQLiteParserFROM_, 0) -} - -func (s *KeywordContext) FULL_() antlr.TerminalNode { - return s.GetToken(SQLiteParserFULL_, 0) -} - -func (s *KeywordContext) GLOB_() antlr.TerminalNode { - return s.GetToken(SQLiteParserGLOB_, 0) -} - -func (s *KeywordContext) GROUP_() antlr.TerminalNode { - return s.GetToken(SQLiteParserGROUP_, 0) -} - -func (s *KeywordContext) HAVING_() antlr.TerminalNode { - return s.GetToken(SQLiteParserHAVING_, 0) -} - -func (s *KeywordContext) IF_() antlr.TerminalNode { - return s.GetToken(SQLiteParserIF_, 0) -} - -func (s *KeywordContext) IGNORE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserIGNORE_, 0) -} - -func (s *KeywordContext) IMMEDIATE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserIMMEDIATE_, 0) -} - -func (s *KeywordContext) IN_() antlr.TerminalNode { - return s.GetToken(SQLiteParserIN_, 0) -} - -func (s *KeywordContext) INDEX_() antlr.TerminalNode { - return s.GetToken(SQLiteParserINDEX_, 0) -} - -func (s *KeywordContext) INDEXED_() antlr.TerminalNode { - return s.GetToken(SQLiteParserINDEXED_, 0) -} - -func (s *KeywordContext) INITIALLY_() antlr.TerminalNode { - return s.GetToken(SQLiteParserINITIALLY_, 0) -} - -func (s *KeywordContext) INNER_() antlr.TerminalNode { - return s.GetToken(SQLiteParserINNER_, 0) -} - -func (s *KeywordContext) INSERT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserINSERT_, 0) -} - -func (s *KeywordContext) INSTEAD_() antlr.TerminalNode { - return s.GetToken(SQLiteParserINSTEAD_, 0) -} - -func (s *KeywordContext) INTERSECT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserINTERSECT_, 0) -} - -func (s *KeywordContext) INTO_() antlr.TerminalNode { - return s.GetToken(SQLiteParserINTO_, 0) -} - -func (s *KeywordContext) IS_() antlr.TerminalNode { - return s.GetToken(SQLiteParserIS_, 0) -} - -func (s *KeywordContext) ISNULL_() antlr.TerminalNode { - return s.GetToken(SQLiteParserISNULL_, 0) -} - -func (s *KeywordContext) JOIN_() antlr.TerminalNode { - return s.GetToken(SQLiteParserJOIN_, 0) -} - -func (s *KeywordContext) KEY_() antlr.TerminalNode { - return s.GetToken(SQLiteParserKEY_, 0) -} - -func (s *KeywordContext) LEFT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserLEFT_, 0) -} - -func (s *KeywordContext) LIKE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserLIKE_, 0) -} - -func (s *KeywordContext) LIMIT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserLIMIT_, 0) -} - -func (s *KeywordContext) MATCH_() antlr.TerminalNode { - return s.GetToken(SQLiteParserMATCH_, 0) -} - -func (s *KeywordContext) NATURAL_() antlr.TerminalNode { - return s.GetToken(SQLiteParserNATURAL_, 0) -} - -func (s *KeywordContext) NO_() antlr.TerminalNode { - return s.GetToken(SQLiteParserNO_, 0) -} - -func (s *KeywordContext) NOT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserNOT_, 0) -} - -func (s *KeywordContext) NOTNULL_() antlr.TerminalNode { - return s.GetToken(SQLiteParserNOTNULL_, 0) -} - -func (s *KeywordContext) NULL_() antlr.TerminalNode { - return s.GetToken(SQLiteParserNULL_, 0) -} - -func (s *KeywordContext) OF_() antlr.TerminalNode { - return s.GetToken(SQLiteParserOF_, 0) -} - -func (s *KeywordContext) OFFSET_() antlr.TerminalNode { - return s.GetToken(SQLiteParserOFFSET_, 0) -} - -func (s *KeywordContext) ON_() antlr.TerminalNode { - return s.GetToken(SQLiteParserON_, 0) -} - -func (s *KeywordContext) OR_() antlr.TerminalNode { - return s.GetToken(SQLiteParserOR_, 0) -} - -func (s *KeywordContext) ORDER_() antlr.TerminalNode { - return s.GetToken(SQLiteParserORDER_, 0) -} - -func (s *KeywordContext) OUTER_() antlr.TerminalNode { - return s.GetToken(SQLiteParserOUTER_, 0) -} - -func (s *KeywordContext) PLAN_() antlr.TerminalNode { - return s.GetToken(SQLiteParserPLAN_, 0) -} - -func (s *KeywordContext) PRAGMA_() antlr.TerminalNode { - return s.GetToken(SQLiteParserPRAGMA_, 0) -} - -func (s *KeywordContext) PRIMARY_() antlr.TerminalNode { - return s.GetToken(SQLiteParserPRIMARY_, 0) -} - -func (s *KeywordContext) QUERY_() antlr.TerminalNode { - return s.GetToken(SQLiteParserQUERY_, 0) -} - -func (s *KeywordContext) RAISE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserRAISE_, 0) -} - -func (s *KeywordContext) RECURSIVE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserRECURSIVE_, 0) -} - -func (s *KeywordContext) REFERENCES_() antlr.TerminalNode { - return s.GetToken(SQLiteParserREFERENCES_, 0) -} - -func (s *KeywordContext) REGEXP_() antlr.TerminalNode { - return s.GetToken(SQLiteParserREGEXP_, 0) -} - -func (s *KeywordContext) REINDEX_() antlr.TerminalNode { - return s.GetToken(SQLiteParserREINDEX_, 0) -} - -func (s *KeywordContext) RELEASE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserRELEASE_, 0) -} - -func (s *KeywordContext) RENAME_() antlr.TerminalNode { - return s.GetToken(SQLiteParserRENAME_, 0) -} - -func (s *KeywordContext) REPLACE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserREPLACE_, 0) -} - -func (s *KeywordContext) RESTRICT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserRESTRICT_, 0) -} - -func (s *KeywordContext) RIGHT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserRIGHT_, 0) -} - -func (s *KeywordContext) ROLLBACK_() antlr.TerminalNode { - return s.GetToken(SQLiteParserROLLBACK_, 0) -} - -func (s *KeywordContext) ROW_() antlr.TerminalNode { - return s.GetToken(SQLiteParserROW_, 0) -} - -func (s *KeywordContext) ROWS_() antlr.TerminalNode { - return s.GetToken(SQLiteParserROWS_, 0) -} - -func (s *KeywordContext) SAVEPOINT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserSAVEPOINT_, 0) -} - -func (s *KeywordContext) SELECT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserSELECT_, 0) -} - -func (s *KeywordContext) SET_() antlr.TerminalNode { - return s.GetToken(SQLiteParserSET_, 0) -} - -func (s *KeywordContext) TABLE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserTABLE_, 0) -} - -func (s *KeywordContext) TEMP_() antlr.TerminalNode { - return s.GetToken(SQLiteParserTEMP_, 0) -} - -func (s *KeywordContext) TEMPORARY_() antlr.TerminalNode { - return s.GetToken(SQLiteParserTEMPORARY_, 0) -} - -func (s *KeywordContext) THEN_() antlr.TerminalNode { - return s.GetToken(SQLiteParserTHEN_, 0) -} - -func (s *KeywordContext) TO_() antlr.TerminalNode { - return s.GetToken(SQLiteParserTO_, 0) -} - -func (s *KeywordContext) TRANSACTION_() antlr.TerminalNode { - return s.GetToken(SQLiteParserTRANSACTION_, 0) -} - -func (s *KeywordContext) TRIGGER_() antlr.TerminalNode { - return s.GetToken(SQLiteParserTRIGGER_, 0) -} - -func (s *KeywordContext) UNION_() antlr.TerminalNode { - return s.GetToken(SQLiteParserUNION_, 0) -} - -func (s *KeywordContext) UNIQUE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserUNIQUE_, 0) -} - -func (s *KeywordContext) UPDATE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserUPDATE_, 0) -} - -func (s *KeywordContext) USING_() antlr.TerminalNode { - return s.GetToken(SQLiteParserUSING_, 0) -} - -func (s *KeywordContext) VACUUM_() antlr.TerminalNode { - return s.GetToken(SQLiteParserVACUUM_, 0) -} - -func (s *KeywordContext) VALUES_() antlr.TerminalNode { - return s.GetToken(SQLiteParserVALUES_, 0) -} - -func (s *KeywordContext) VIEW_() antlr.TerminalNode { - return s.GetToken(SQLiteParserVIEW_, 0) -} - -func (s *KeywordContext) VIRTUAL_() antlr.TerminalNode { - return s.GetToken(SQLiteParserVIRTUAL_, 0) -} - -func (s *KeywordContext) WHEN_() antlr.TerminalNode { - return s.GetToken(SQLiteParserWHEN_, 0) -} - -func (s *KeywordContext) WHERE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserWHERE_, 0) -} - -func (s *KeywordContext) WITH_() antlr.TerminalNode { - return s.GetToken(SQLiteParserWITH_, 0) -} - -func (s *KeywordContext) WITHOUT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserWITHOUT_, 0) -} - -func (s *KeywordContext) FIRST_VALUE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserFIRST_VALUE_, 0) -} - -func (s *KeywordContext) OVER_() antlr.TerminalNode { - return s.GetToken(SQLiteParserOVER_, 0) -} - -func (s *KeywordContext) PARTITION_() antlr.TerminalNode { - return s.GetToken(SQLiteParserPARTITION_, 0) -} - -func (s *KeywordContext) RANGE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserRANGE_, 0) -} - -func (s *KeywordContext) PRECEDING_() antlr.TerminalNode { - return s.GetToken(SQLiteParserPRECEDING_, 0) -} - -func (s *KeywordContext) UNBOUNDED_() antlr.TerminalNode { - return s.GetToken(SQLiteParserUNBOUNDED_, 0) -} - -func (s *KeywordContext) CURRENT_() antlr.TerminalNode { - return s.GetToken(SQLiteParserCURRENT_, 0) -} - -func (s *KeywordContext) FOLLOWING_() antlr.TerminalNode { - return s.GetToken(SQLiteParserFOLLOWING_, 0) -} - -func (s *KeywordContext) CUME_DIST_() antlr.TerminalNode { - return s.GetToken(SQLiteParserCUME_DIST_, 0) -} - -func (s *KeywordContext) DENSE_RANK_() antlr.TerminalNode { - return s.GetToken(SQLiteParserDENSE_RANK_, 0) -} - -func (s *KeywordContext) LAG_() antlr.TerminalNode { - return s.GetToken(SQLiteParserLAG_, 0) -} - -func (s *KeywordContext) LAST_VALUE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserLAST_VALUE_, 0) -} - -func (s *KeywordContext) LEAD_() antlr.TerminalNode { - return s.GetToken(SQLiteParserLEAD_, 0) -} - -func (s *KeywordContext) NTH_VALUE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserNTH_VALUE_, 0) -} - -func (s *KeywordContext) NTILE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserNTILE_, 0) -} - -func (s *KeywordContext) PERCENT_RANK_() antlr.TerminalNode { - return s.GetToken(SQLiteParserPERCENT_RANK_, 0) -} - -func (s *KeywordContext) RANK_() antlr.TerminalNode { - return s.GetToken(SQLiteParserRANK_, 0) -} - -func (s *KeywordContext) ROW_NUMBER_() antlr.TerminalNode { - return s.GetToken(SQLiteParserROW_NUMBER_, 0) -} - -func (s *KeywordContext) GENERATED_() antlr.TerminalNode { - return s.GetToken(SQLiteParserGENERATED_, 0) -} - -func (s *KeywordContext) ALWAYS_() antlr.TerminalNode { - return s.GetToken(SQLiteParserALWAYS_, 0) -} - -func (s *KeywordContext) STORED_() antlr.TerminalNode { - return s.GetToken(SQLiteParserSTORED_, 0) -} - -func (s *KeywordContext) TRUE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserTRUE_, 0) -} - -func (s *KeywordContext) FALSE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserFALSE_, 0) -} - -func (s *KeywordContext) WINDOW_() antlr.TerminalNode { - return s.GetToken(SQLiteParserWINDOW_, 0) -} - -func (s *KeywordContext) NULLS_() antlr.TerminalNode { - return s.GetToken(SQLiteParserNULLS_, 0) -} - -func (s *KeywordContext) FIRST_() antlr.TerminalNode { - return s.GetToken(SQLiteParserFIRST_, 0) -} - -func (s *KeywordContext) LAST_() antlr.TerminalNode { - return s.GetToken(SQLiteParserLAST_, 0) -} - -func (s *KeywordContext) FILTER_() antlr.TerminalNode { - return s.GetToken(SQLiteParserFILTER_, 0) -} - -func (s *KeywordContext) GROUPS_() antlr.TerminalNode { - return s.GetToken(SQLiteParserGROUPS_, 0) -} - -func (s *KeywordContext) EXCLUDE_() antlr.TerminalNode { - return s.GetToken(SQLiteParserEXCLUDE_, 0) -} - -func (s *KeywordContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *KeywordContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *KeywordContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterKeyword(s) - } -} - -func (s *KeywordContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitKeyword(s) - } -} - -func (p *SQLiteParser) Keyword() (localctx IKeywordContext) { - this := p - _ = this - - localctx = NewKeywordContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 176, SQLiteParserRULE_keyword) - var _la int - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(1998) - _la = p.GetTokenStream().LA(1) - - if !(((int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&-33554432) != 0) || ((int64((_la-64)) & ^0x3f) == 0 && ((int64(1)<<(_la-64))&-1152921504606846977) != 0) || ((int64((_la-128)) & ^0x3f) == 0 && ((int64(1)<<(_la-128))&9007199254740991) != 0)) { - p.GetErrorHandler().RecoverInline(p) - } else { - p.GetErrorHandler().ReportMatch(p) - p.Consume() - } - } - - return localctx -} - -// INameContext is an interface to support dynamic dispatch. -type INameContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - Any_name() IAny_nameContext - - // IsNameContext differentiates from other interfaces. - IsNameContext() -} - -type NameContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyNameContext() *NameContext { - var p = new(NameContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_name - return p -} - -func (*NameContext) IsNameContext() {} - -func NewNameContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *NameContext { - var p = new(NameContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_name - - return p -} - -func (s *NameContext) GetParser() antlr.Parser { return s.parser } - -func (s *NameContext) Any_name() IAny_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IAny_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IAny_nameContext) -} - -func (s *NameContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *NameContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *NameContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterName(s) - } -} - -func (s *NameContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitName(s) - } -} - -func (p *SQLiteParser) Name() (localctx INameContext) { - this := p - _ = this - - localctx = NewNameContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 178, SQLiteParserRULE_name) - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(2000) - p.Any_name() - } - - return localctx -} - -// IFunction_nameContext is an interface to support dynamic dispatch. -type IFunction_nameContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - Any_name() IAny_nameContext - - // IsFunction_nameContext differentiates from other interfaces. - IsFunction_nameContext() -} - -type Function_nameContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyFunction_nameContext() *Function_nameContext { - var p = new(Function_nameContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_function_name - return p -} - -func (*Function_nameContext) IsFunction_nameContext() {} - -func NewFunction_nameContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Function_nameContext { - var p = new(Function_nameContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_function_name - - return p -} - -func (s *Function_nameContext) GetParser() antlr.Parser { return s.parser } - -func (s *Function_nameContext) Any_name() IAny_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IAny_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IAny_nameContext) -} - -func (s *Function_nameContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Function_nameContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Function_nameContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterFunction_name(s) - } -} - -func (s *Function_nameContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitFunction_name(s) - } -} - -func (p *SQLiteParser) Function_name() (localctx IFunction_nameContext) { - this := p - _ = this - - localctx = NewFunction_nameContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 180, SQLiteParserRULE_function_name) - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(2002) - p.Any_name() - } - - return localctx -} - -// ISchema_nameContext is an interface to support dynamic dispatch. -type ISchema_nameContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - Any_name() IAny_nameContext - - // IsSchema_nameContext differentiates from other interfaces. - IsSchema_nameContext() -} - -type Schema_nameContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptySchema_nameContext() *Schema_nameContext { - var p = new(Schema_nameContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_schema_name - return p -} - -func (*Schema_nameContext) IsSchema_nameContext() {} - -func NewSchema_nameContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Schema_nameContext { - var p = new(Schema_nameContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_schema_name - - return p -} - -func (s *Schema_nameContext) GetParser() antlr.Parser { return s.parser } - -func (s *Schema_nameContext) Any_name() IAny_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IAny_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IAny_nameContext) -} - -func (s *Schema_nameContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Schema_nameContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Schema_nameContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterSchema_name(s) - } -} - -func (s *Schema_nameContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitSchema_name(s) - } -} - -func (p *SQLiteParser) Schema_name() (localctx ISchema_nameContext) { - this := p - _ = this - - localctx = NewSchema_nameContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 182, SQLiteParserRULE_schema_name) - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(2004) - p.Any_name() - } - - return localctx -} - -// ITable_nameContext is an interface to support dynamic dispatch. -type ITable_nameContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - Any_name() IAny_nameContext - - // IsTable_nameContext differentiates from other interfaces. - IsTable_nameContext() -} - -type Table_nameContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyTable_nameContext() *Table_nameContext { - var p = new(Table_nameContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_table_name - return p -} - -func (*Table_nameContext) IsTable_nameContext() {} - -func NewTable_nameContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Table_nameContext { - var p = new(Table_nameContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_table_name - - return p -} - -func (s *Table_nameContext) GetParser() antlr.Parser { return s.parser } - -func (s *Table_nameContext) Any_name() IAny_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IAny_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IAny_nameContext) -} - -func (s *Table_nameContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Table_nameContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Table_nameContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterTable_name(s) - } -} - -func (s *Table_nameContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitTable_name(s) - } -} - -func (p *SQLiteParser) Table_name() (localctx ITable_nameContext) { - this := p - _ = this - - localctx = NewTable_nameContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 184, SQLiteParserRULE_table_name) - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(2006) - p.Any_name() - } - - return localctx -} - -// ITable_or_index_nameContext is an interface to support dynamic dispatch. -type ITable_or_index_nameContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - Any_name() IAny_nameContext - - // IsTable_or_index_nameContext differentiates from other interfaces. - IsTable_or_index_nameContext() -} - -type Table_or_index_nameContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyTable_or_index_nameContext() *Table_or_index_nameContext { - var p = new(Table_or_index_nameContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_table_or_index_name - return p -} - -func (*Table_or_index_nameContext) IsTable_or_index_nameContext() {} - -func NewTable_or_index_nameContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Table_or_index_nameContext { - var p = new(Table_or_index_nameContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_table_or_index_name - - return p -} - -func (s *Table_or_index_nameContext) GetParser() antlr.Parser { return s.parser } - -func (s *Table_or_index_nameContext) Any_name() IAny_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IAny_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IAny_nameContext) -} - -func (s *Table_or_index_nameContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Table_or_index_nameContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Table_or_index_nameContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterTable_or_index_name(s) - } -} - -func (s *Table_or_index_nameContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitTable_or_index_name(s) - } -} - -func (p *SQLiteParser) Table_or_index_name() (localctx ITable_or_index_nameContext) { - this := p - _ = this - - localctx = NewTable_or_index_nameContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 186, SQLiteParserRULE_table_or_index_name) - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(2008) - p.Any_name() - } - - return localctx -} - -// IColumn_nameContext is an interface to support dynamic dispatch. -type IColumn_nameContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - Any_name() IAny_nameContext - - // IsColumn_nameContext differentiates from other interfaces. - IsColumn_nameContext() -} - -type Column_nameContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyColumn_nameContext() *Column_nameContext { - var p = new(Column_nameContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_column_name - return p -} - -func (*Column_nameContext) IsColumn_nameContext() {} - -func NewColumn_nameContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Column_nameContext { - var p = new(Column_nameContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_column_name - - return p -} - -func (s *Column_nameContext) GetParser() antlr.Parser { return s.parser } - -func (s *Column_nameContext) Any_name() IAny_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IAny_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IAny_nameContext) -} - -func (s *Column_nameContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Column_nameContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Column_nameContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterColumn_name(s) - } -} - -func (s *Column_nameContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitColumn_name(s) - } -} - -func (p *SQLiteParser) Column_name() (localctx IColumn_nameContext) { - this := p - _ = this - - localctx = NewColumn_nameContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 188, SQLiteParserRULE_column_name) - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(2010) - p.Any_name() - } - - return localctx -} - -// ICollation_nameContext is an interface to support dynamic dispatch. -type ICollation_nameContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - Any_name() IAny_nameContext - - // IsCollation_nameContext differentiates from other interfaces. - IsCollation_nameContext() -} - -type Collation_nameContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyCollation_nameContext() *Collation_nameContext { - var p = new(Collation_nameContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_collation_name - return p -} - -func (*Collation_nameContext) IsCollation_nameContext() {} - -func NewCollation_nameContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Collation_nameContext { - var p = new(Collation_nameContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_collation_name - - return p -} - -func (s *Collation_nameContext) GetParser() antlr.Parser { return s.parser } - -func (s *Collation_nameContext) Any_name() IAny_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IAny_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IAny_nameContext) -} - -func (s *Collation_nameContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Collation_nameContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Collation_nameContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterCollation_name(s) - } -} - -func (s *Collation_nameContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitCollation_name(s) - } -} - -func (p *SQLiteParser) Collation_name() (localctx ICollation_nameContext) { - this := p - _ = this - - localctx = NewCollation_nameContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 190, SQLiteParserRULE_collation_name) - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(2012) - p.Any_name() - } - - return localctx -} - -// IForeign_tableContext is an interface to support dynamic dispatch. -type IForeign_tableContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - Any_name() IAny_nameContext - - // IsForeign_tableContext differentiates from other interfaces. - IsForeign_tableContext() -} - -type Foreign_tableContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyForeign_tableContext() *Foreign_tableContext { - var p = new(Foreign_tableContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_foreign_table - return p -} - -func (*Foreign_tableContext) IsForeign_tableContext() {} - -func NewForeign_tableContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Foreign_tableContext { - var p = new(Foreign_tableContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_foreign_table - - return p -} - -func (s *Foreign_tableContext) GetParser() antlr.Parser { return s.parser } - -func (s *Foreign_tableContext) Any_name() IAny_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IAny_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IAny_nameContext) -} - -func (s *Foreign_tableContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Foreign_tableContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Foreign_tableContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterForeign_table(s) - } -} - -func (s *Foreign_tableContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitForeign_table(s) - } -} - -func (p *SQLiteParser) Foreign_table() (localctx IForeign_tableContext) { - this := p - _ = this - - localctx = NewForeign_tableContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 192, SQLiteParserRULE_foreign_table) - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(2014) - p.Any_name() - } - - return localctx -} - -// IIndex_nameContext is an interface to support dynamic dispatch. -type IIndex_nameContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - Any_name() IAny_nameContext - - // IsIndex_nameContext differentiates from other interfaces. - IsIndex_nameContext() -} - -type Index_nameContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyIndex_nameContext() *Index_nameContext { - var p = new(Index_nameContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_index_name - return p -} - -func (*Index_nameContext) IsIndex_nameContext() {} - -func NewIndex_nameContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Index_nameContext { - var p = new(Index_nameContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_index_name - - return p -} - -func (s *Index_nameContext) GetParser() antlr.Parser { return s.parser } - -func (s *Index_nameContext) Any_name() IAny_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IAny_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IAny_nameContext) -} - -func (s *Index_nameContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Index_nameContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Index_nameContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterIndex_name(s) - } -} - -func (s *Index_nameContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitIndex_name(s) - } -} - -func (p *SQLiteParser) Index_name() (localctx IIndex_nameContext) { - this := p - _ = this - - localctx = NewIndex_nameContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 194, SQLiteParserRULE_index_name) - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(2016) - p.Any_name() - } - - return localctx -} - -// ITrigger_nameContext is an interface to support dynamic dispatch. -type ITrigger_nameContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - Any_name() IAny_nameContext - - // IsTrigger_nameContext differentiates from other interfaces. - IsTrigger_nameContext() -} - -type Trigger_nameContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyTrigger_nameContext() *Trigger_nameContext { - var p = new(Trigger_nameContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_trigger_name - return p -} - -func (*Trigger_nameContext) IsTrigger_nameContext() {} - -func NewTrigger_nameContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Trigger_nameContext { - var p = new(Trigger_nameContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_trigger_name - - return p -} - -func (s *Trigger_nameContext) GetParser() antlr.Parser { return s.parser } - -func (s *Trigger_nameContext) Any_name() IAny_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IAny_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IAny_nameContext) -} - -func (s *Trigger_nameContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Trigger_nameContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Trigger_nameContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterTrigger_name(s) - } -} - -func (s *Trigger_nameContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitTrigger_name(s) - } -} - -func (p *SQLiteParser) Trigger_name() (localctx ITrigger_nameContext) { - this := p - _ = this - - localctx = NewTrigger_nameContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 196, SQLiteParserRULE_trigger_name) - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(2018) - p.Any_name() - } - - return localctx -} - -// IView_nameContext is an interface to support dynamic dispatch. -type IView_nameContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - Any_name() IAny_nameContext - - // IsView_nameContext differentiates from other interfaces. - IsView_nameContext() -} - -type View_nameContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyView_nameContext() *View_nameContext { - var p = new(View_nameContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_view_name - return p -} - -func (*View_nameContext) IsView_nameContext() {} - -func NewView_nameContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *View_nameContext { - var p = new(View_nameContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_view_name - - return p -} - -func (s *View_nameContext) GetParser() antlr.Parser { return s.parser } - -func (s *View_nameContext) Any_name() IAny_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IAny_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IAny_nameContext) -} - -func (s *View_nameContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *View_nameContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *View_nameContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterView_name(s) - } -} - -func (s *View_nameContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitView_name(s) - } -} - -func (p *SQLiteParser) View_name() (localctx IView_nameContext) { - this := p - _ = this - - localctx = NewView_nameContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 198, SQLiteParserRULE_view_name) - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(2020) - p.Any_name() - } - - return localctx -} - -// IModule_nameContext is an interface to support dynamic dispatch. -type IModule_nameContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - Any_name() IAny_nameContext - - // IsModule_nameContext differentiates from other interfaces. - IsModule_nameContext() -} - -type Module_nameContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyModule_nameContext() *Module_nameContext { - var p = new(Module_nameContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_module_name - return p -} - -func (*Module_nameContext) IsModule_nameContext() {} - -func NewModule_nameContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Module_nameContext { - var p = new(Module_nameContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_module_name - - return p -} - -func (s *Module_nameContext) GetParser() antlr.Parser { return s.parser } - -func (s *Module_nameContext) Any_name() IAny_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IAny_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IAny_nameContext) -} - -func (s *Module_nameContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Module_nameContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Module_nameContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterModule_name(s) - } -} - -func (s *Module_nameContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitModule_name(s) - } -} - -func (p *SQLiteParser) Module_name() (localctx IModule_nameContext) { - this := p - _ = this - - localctx = NewModule_nameContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 200, SQLiteParserRULE_module_name) - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(2022) - p.Any_name() - } - - return localctx -} - -// IPragma_nameContext is an interface to support dynamic dispatch. -type IPragma_nameContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - Any_name() IAny_nameContext - - // IsPragma_nameContext differentiates from other interfaces. - IsPragma_nameContext() -} - -type Pragma_nameContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyPragma_nameContext() *Pragma_nameContext { - var p = new(Pragma_nameContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_pragma_name - return p -} - -func (*Pragma_nameContext) IsPragma_nameContext() {} - -func NewPragma_nameContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Pragma_nameContext { - var p = new(Pragma_nameContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_pragma_name - - return p -} - -func (s *Pragma_nameContext) GetParser() antlr.Parser { return s.parser } - -func (s *Pragma_nameContext) Any_name() IAny_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IAny_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IAny_nameContext) -} - -func (s *Pragma_nameContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Pragma_nameContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Pragma_nameContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterPragma_name(s) - } -} - -func (s *Pragma_nameContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitPragma_name(s) - } -} - -func (p *SQLiteParser) Pragma_name() (localctx IPragma_nameContext) { - this := p - _ = this - - localctx = NewPragma_nameContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 202, SQLiteParserRULE_pragma_name) - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(2024) - p.Any_name() - } - - return localctx -} - -// ISavepoint_nameContext is an interface to support dynamic dispatch. -type ISavepoint_nameContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - Any_name() IAny_nameContext - - // IsSavepoint_nameContext differentiates from other interfaces. - IsSavepoint_nameContext() -} - -type Savepoint_nameContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptySavepoint_nameContext() *Savepoint_nameContext { - var p = new(Savepoint_nameContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_savepoint_name - return p -} - -func (*Savepoint_nameContext) IsSavepoint_nameContext() {} - -func NewSavepoint_nameContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Savepoint_nameContext { - var p = new(Savepoint_nameContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_savepoint_name - - return p -} - -func (s *Savepoint_nameContext) GetParser() antlr.Parser { return s.parser } - -func (s *Savepoint_nameContext) Any_name() IAny_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IAny_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IAny_nameContext) -} - -func (s *Savepoint_nameContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Savepoint_nameContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Savepoint_nameContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterSavepoint_name(s) - } -} - -func (s *Savepoint_nameContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitSavepoint_name(s) - } -} - -func (p *SQLiteParser) Savepoint_name() (localctx ISavepoint_nameContext) { - this := p - _ = this - - localctx = NewSavepoint_nameContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 204, SQLiteParserRULE_savepoint_name) - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(2026) - p.Any_name() - } - - return localctx -} - -// ITable_aliasContext is an interface to support dynamic dispatch. -type ITable_aliasContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - Any_name() IAny_nameContext - - // IsTable_aliasContext differentiates from other interfaces. - IsTable_aliasContext() -} - -type Table_aliasContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyTable_aliasContext() *Table_aliasContext { - var p = new(Table_aliasContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_table_alias - return p -} - -func (*Table_aliasContext) IsTable_aliasContext() {} - -func NewTable_aliasContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Table_aliasContext { - var p = new(Table_aliasContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_table_alias - - return p -} - -func (s *Table_aliasContext) GetParser() antlr.Parser { return s.parser } - -func (s *Table_aliasContext) Any_name() IAny_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IAny_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IAny_nameContext) -} - -func (s *Table_aliasContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Table_aliasContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Table_aliasContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterTable_alias(s) - } -} - -func (s *Table_aliasContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitTable_alias(s) - } -} - -func (p *SQLiteParser) Table_alias() (localctx ITable_aliasContext) { - this := p - _ = this - - localctx = NewTable_aliasContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 206, SQLiteParserRULE_table_alias) - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(2028) - p.Any_name() - } - - return localctx -} - -// ITransaction_nameContext is an interface to support dynamic dispatch. -type ITransaction_nameContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - Any_name() IAny_nameContext - - // IsTransaction_nameContext differentiates from other interfaces. - IsTransaction_nameContext() -} - -type Transaction_nameContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyTransaction_nameContext() *Transaction_nameContext { - var p = new(Transaction_nameContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_transaction_name - return p -} - -func (*Transaction_nameContext) IsTransaction_nameContext() {} - -func NewTransaction_nameContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Transaction_nameContext { - var p = new(Transaction_nameContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_transaction_name - - return p -} - -func (s *Transaction_nameContext) GetParser() antlr.Parser { return s.parser } - -func (s *Transaction_nameContext) Any_name() IAny_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IAny_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IAny_nameContext) -} - -func (s *Transaction_nameContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Transaction_nameContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Transaction_nameContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterTransaction_name(s) - } -} - -func (s *Transaction_nameContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitTransaction_name(s) - } -} - -func (p *SQLiteParser) Transaction_name() (localctx ITransaction_nameContext) { - this := p - _ = this - - localctx = NewTransaction_nameContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 208, SQLiteParserRULE_transaction_name) - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(2030) - p.Any_name() - } - - return localctx -} - -// IWindow_nameContext is an interface to support dynamic dispatch. -type IWindow_nameContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - Any_name() IAny_nameContext - - // IsWindow_nameContext differentiates from other interfaces. - IsWindow_nameContext() -} - -type Window_nameContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyWindow_nameContext() *Window_nameContext { - var p = new(Window_nameContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_window_name - return p -} - -func (*Window_nameContext) IsWindow_nameContext() {} - -func NewWindow_nameContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Window_nameContext { - var p = new(Window_nameContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_window_name - - return p -} - -func (s *Window_nameContext) GetParser() antlr.Parser { return s.parser } - -func (s *Window_nameContext) Any_name() IAny_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IAny_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IAny_nameContext) -} - -func (s *Window_nameContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Window_nameContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Window_nameContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterWindow_name(s) - } -} - -func (s *Window_nameContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitWindow_name(s) - } -} - -func (p *SQLiteParser) Window_name() (localctx IWindow_nameContext) { - this := p - _ = this - - localctx = NewWindow_nameContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 210, SQLiteParserRULE_window_name) - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(2032) - p.Any_name() - } - - return localctx -} - -// IAliasContext is an interface to support dynamic dispatch. -type IAliasContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - Any_name() IAny_nameContext - - // IsAliasContext differentiates from other interfaces. - IsAliasContext() -} - -type AliasContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyAliasContext() *AliasContext { - var p = new(AliasContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_alias - return p -} - -func (*AliasContext) IsAliasContext() {} - -func NewAliasContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *AliasContext { - var p = new(AliasContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_alias - - return p -} - -func (s *AliasContext) GetParser() antlr.Parser { return s.parser } - -func (s *AliasContext) Any_name() IAny_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IAny_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IAny_nameContext) -} - -func (s *AliasContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *AliasContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *AliasContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterAlias(s) - } -} - -func (s *AliasContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitAlias(s) - } -} - -func (p *SQLiteParser) Alias() (localctx IAliasContext) { - this := p - _ = this - - localctx = NewAliasContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 212, SQLiteParserRULE_alias) - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(2034) - p.Any_name() - } - - return localctx -} - -// IFilenameContext is an interface to support dynamic dispatch. -type IFilenameContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - Any_name() IAny_nameContext - - // IsFilenameContext differentiates from other interfaces. - IsFilenameContext() -} - -type FilenameContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyFilenameContext() *FilenameContext { - var p = new(FilenameContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_filename - return p -} - -func (*FilenameContext) IsFilenameContext() {} - -func NewFilenameContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *FilenameContext { - var p = new(FilenameContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_filename - - return p -} - -func (s *FilenameContext) GetParser() antlr.Parser { return s.parser } - -func (s *FilenameContext) Any_name() IAny_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IAny_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IAny_nameContext) -} - -func (s *FilenameContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *FilenameContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *FilenameContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterFilename(s) - } -} - -func (s *FilenameContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitFilename(s) - } -} - -func (p *SQLiteParser) Filename() (localctx IFilenameContext) { - this := p - _ = this - - localctx = NewFilenameContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 214, SQLiteParserRULE_filename) - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(2036) - p.Any_name() - } - - return localctx -} - -// IBase_window_nameContext is an interface to support dynamic dispatch. -type IBase_window_nameContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - Any_name() IAny_nameContext - - // IsBase_window_nameContext differentiates from other interfaces. - IsBase_window_nameContext() -} - -type Base_window_nameContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyBase_window_nameContext() *Base_window_nameContext { - var p = new(Base_window_nameContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_base_window_name - return p -} - -func (*Base_window_nameContext) IsBase_window_nameContext() {} - -func NewBase_window_nameContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Base_window_nameContext { - var p = new(Base_window_nameContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_base_window_name - - return p -} - -func (s *Base_window_nameContext) GetParser() antlr.Parser { return s.parser } - -func (s *Base_window_nameContext) Any_name() IAny_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IAny_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IAny_nameContext) -} - -func (s *Base_window_nameContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Base_window_nameContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Base_window_nameContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterBase_window_name(s) - } -} - -func (s *Base_window_nameContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitBase_window_name(s) - } -} - -func (p *SQLiteParser) Base_window_name() (localctx IBase_window_nameContext) { - this := p - _ = this - - localctx = NewBase_window_nameContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 216, SQLiteParserRULE_base_window_name) - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(2038) - p.Any_name() - } - - return localctx -} - -// ISimple_funcContext is an interface to support dynamic dispatch. -type ISimple_funcContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - Any_name() IAny_nameContext - - // IsSimple_funcContext differentiates from other interfaces. - IsSimple_funcContext() -} - -type Simple_funcContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptySimple_funcContext() *Simple_funcContext { - var p = new(Simple_funcContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_simple_func - return p -} - -func (*Simple_funcContext) IsSimple_funcContext() {} - -func NewSimple_funcContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Simple_funcContext { - var p = new(Simple_funcContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_simple_func - - return p -} - -func (s *Simple_funcContext) GetParser() antlr.Parser { return s.parser } - -func (s *Simple_funcContext) Any_name() IAny_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IAny_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IAny_nameContext) -} - -func (s *Simple_funcContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Simple_funcContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Simple_funcContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterSimple_func(s) - } -} - -func (s *Simple_funcContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitSimple_func(s) - } -} - -func (p *SQLiteParser) Simple_func() (localctx ISimple_funcContext) { - this := p - _ = this - - localctx = NewSimple_funcContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 218, SQLiteParserRULE_simple_func) - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(2040) - p.Any_name() - } - - return localctx -} - -// IAggregate_funcContext is an interface to support dynamic dispatch. -type IAggregate_funcContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - Any_name() IAny_nameContext - - // IsAggregate_funcContext differentiates from other interfaces. - IsAggregate_funcContext() -} - -type Aggregate_funcContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyAggregate_funcContext() *Aggregate_funcContext { - var p = new(Aggregate_funcContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_aggregate_func - return p -} - -func (*Aggregate_funcContext) IsAggregate_funcContext() {} - -func NewAggregate_funcContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Aggregate_funcContext { - var p = new(Aggregate_funcContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_aggregate_func - - return p -} - -func (s *Aggregate_funcContext) GetParser() antlr.Parser { return s.parser } - -func (s *Aggregate_funcContext) Any_name() IAny_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IAny_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IAny_nameContext) -} - -func (s *Aggregate_funcContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Aggregate_funcContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Aggregate_funcContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterAggregate_func(s) - } -} - -func (s *Aggregate_funcContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitAggregate_func(s) - } -} - -func (p *SQLiteParser) Aggregate_func() (localctx IAggregate_funcContext) { - this := p - _ = this - - localctx = NewAggregate_funcContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 220, SQLiteParserRULE_aggregate_func) - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(2042) - p.Any_name() - } - - return localctx -} - -// ITable_function_nameContext is an interface to support dynamic dispatch. -type ITable_function_nameContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - Any_name() IAny_nameContext - - // IsTable_function_nameContext differentiates from other interfaces. - IsTable_function_nameContext() -} - -type Table_function_nameContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyTable_function_nameContext() *Table_function_nameContext { - var p = new(Table_function_nameContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_table_function_name - return p -} - -func (*Table_function_nameContext) IsTable_function_nameContext() {} - -func NewTable_function_nameContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Table_function_nameContext { - var p = new(Table_function_nameContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_table_function_name - - return p -} - -func (s *Table_function_nameContext) GetParser() antlr.Parser { return s.parser } - -func (s *Table_function_nameContext) Any_name() IAny_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IAny_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IAny_nameContext) -} - -func (s *Table_function_nameContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Table_function_nameContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Table_function_nameContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterTable_function_name(s) - } -} - -func (s *Table_function_nameContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitTable_function_name(s) - } -} - -func (p *SQLiteParser) Table_function_name() (localctx ITable_function_nameContext) { - this := p - _ = this - - localctx = NewTable_function_nameContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 222, SQLiteParserRULE_table_function_name) - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.EnterOuterAlt(localctx, 1) - { - p.SetState(2044) - p.Any_name() - } - - return localctx -} - -// IAny_nameContext is an interface to support dynamic dispatch. -type IAny_nameContext interface { - antlr.ParserRuleContext - - // GetParser returns the parser. - GetParser() antlr.Parser - - // Getter signatures - IDENTIFIER() antlr.TerminalNode - Keyword() IKeywordContext - STRING_LITERAL() antlr.TerminalNode - OPEN_PAR() antlr.TerminalNode - Any_name() IAny_nameContext - CLOSE_PAR() antlr.TerminalNode - - // IsAny_nameContext differentiates from other interfaces. - IsAny_nameContext() -} - -type Any_nameContext struct { - *antlr.BaseParserRuleContext - parser antlr.Parser -} - -func NewEmptyAny_nameContext() *Any_nameContext { - var p = new(Any_nameContext) - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) - p.RuleIndex = SQLiteParserRULE_any_name - return p -} - -func (*Any_nameContext) IsAny_nameContext() {} - -func NewAny_nameContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Any_nameContext { - var p = new(Any_nameContext) - - p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) - - p.parser = parser - p.RuleIndex = SQLiteParserRULE_any_name - - return p -} - -func (s *Any_nameContext) GetParser() antlr.Parser { return s.parser } - -func (s *Any_nameContext) IDENTIFIER() antlr.TerminalNode { - return s.GetToken(SQLiteParserIDENTIFIER, 0) -} - -func (s *Any_nameContext) Keyword() IKeywordContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IKeywordContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IKeywordContext) -} - -func (s *Any_nameContext) STRING_LITERAL() antlr.TerminalNode { - return s.GetToken(SQLiteParserSTRING_LITERAL, 0) -} - -func (s *Any_nameContext) OPEN_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserOPEN_PAR, 0) -} - -func (s *Any_nameContext) Any_name() IAny_nameContext { - var t antlr.RuleContext - for _, ctx := range s.GetChildren() { - if _, ok := ctx.(IAny_nameContext); ok { - t = ctx.(antlr.RuleContext) - break - } - } - - if t == nil { - return nil - } - - return t.(IAny_nameContext) -} - -func (s *Any_nameContext) CLOSE_PAR() antlr.TerminalNode { - return s.GetToken(SQLiteParserCLOSE_PAR, 0) -} - -func (s *Any_nameContext) GetRuleContext() antlr.RuleContext { - return s -} - -func (s *Any_nameContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { - return antlr.TreesStringTree(s, ruleNames, recog) -} - -func (s *Any_nameContext) EnterRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.EnterAny_name(s) - } -} - -func (s *Any_nameContext) ExitRule(listener antlr.ParseTreeListener) { - if listenerT, ok := listener.(SQLiteParserListener); ok { - listenerT.ExitAny_name(s) - } -} - -func (p *SQLiteParser) Any_name() (localctx IAny_nameContext) { - this := p - _ = this - - localctx = NewAny_nameContext(p, p.GetParserRuleContext(), p.GetState()) - p.EnterRule(localctx, 224, SQLiteParserRULE_any_name) - - defer func() { - p.ExitRule() - }() - - defer func() { - if err := recover(); err != nil { - if v, ok := err.(antlr.RecognitionException); ok { - localctx.SetException(v) - p.GetErrorHandler().ReportError(p, v) - p.GetErrorHandler().Recover(p, v) - } else { - panic(err) - } - } - }() - - p.SetState(2053) - p.GetErrorHandler().Sync(p) - - switch p.GetTokenStream().LA(1) { - case SQLiteParserIDENTIFIER: - p.EnterOuterAlt(localctx, 1) - { - p.SetState(2046) - p.Match(SQLiteParserIDENTIFIER) - } - - case SQLiteParserABORT_, SQLiteParserACTION_, SQLiteParserADD_, SQLiteParserAFTER_, SQLiteParserALL_, SQLiteParserALTER_, SQLiteParserANALYZE_, SQLiteParserAND_, SQLiteParserAS_, SQLiteParserASC_, SQLiteParserATTACH_, SQLiteParserAUTOINCREMENT_, SQLiteParserBEFORE_, SQLiteParserBEGIN_, SQLiteParserBETWEEN_, SQLiteParserBY_, SQLiteParserCASCADE_, SQLiteParserCASE_, SQLiteParserCAST_, SQLiteParserCHECK_, SQLiteParserCOLLATE_, SQLiteParserCOLUMN_, SQLiteParserCOMMIT_, SQLiteParserCONFLICT_, SQLiteParserCONSTRAINT_, SQLiteParserCREATE_, SQLiteParserCROSS_, SQLiteParserCURRENT_DATE_, SQLiteParserCURRENT_TIME_, SQLiteParserCURRENT_TIMESTAMP_, SQLiteParserDATABASE_, SQLiteParserDEFAULT_, SQLiteParserDEFERRABLE_, SQLiteParserDEFERRED_, SQLiteParserDELETE_, SQLiteParserDESC_, SQLiteParserDETACH_, SQLiteParserDISTINCT_, SQLiteParserDROP_, SQLiteParserEACH_, SQLiteParserELSE_, SQLiteParserEND_, SQLiteParserESCAPE_, SQLiteParserEXCEPT_, SQLiteParserEXCLUSIVE_, SQLiteParserEXISTS_, SQLiteParserEXPLAIN_, SQLiteParserFAIL_, SQLiteParserFOR_, SQLiteParserFOREIGN_, SQLiteParserFROM_, SQLiteParserFULL_, SQLiteParserGLOB_, SQLiteParserGROUP_, SQLiteParserHAVING_, SQLiteParserIF_, SQLiteParserIGNORE_, SQLiteParserIMMEDIATE_, SQLiteParserIN_, SQLiteParserINDEX_, SQLiteParserINDEXED_, SQLiteParserINITIALLY_, SQLiteParserINNER_, SQLiteParserINSERT_, SQLiteParserINSTEAD_, SQLiteParserINTERSECT_, SQLiteParserINTO_, SQLiteParserIS_, SQLiteParserISNULL_, SQLiteParserJOIN_, SQLiteParserKEY_, SQLiteParserLEFT_, SQLiteParserLIKE_, SQLiteParserLIMIT_, SQLiteParserMATCH_, SQLiteParserNATURAL_, SQLiteParserNO_, SQLiteParserNOT_, SQLiteParserNOTNULL_, SQLiteParserNULL_, SQLiteParserOF_, SQLiteParserOFFSET_, SQLiteParserON_, SQLiteParserOR_, SQLiteParserORDER_, SQLiteParserOUTER_, SQLiteParserPLAN_, SQLiteParserPRAGMA_, SQLiteParserPRIMARY_, SQLiteParserQUERY_, SQLiteParserRAISE_, SQLiteParserRECURSIVE_, SQLiteParserREFERENCES_, SQLiteParserREGEXP_, SQLiteParserREINDEX_, SQLiteParserRELEASE_, SQLiteParserRENAME_, SQLiteParserREPLACE_, SQLiteParserRESTRICT_, SQLiteParserRIGHT_, SQLiteParserROLLBACK_, SQLiteParserROW_, SQLiteParserROWS_, SQLiteParserSAVEPOINT_, SQLiteParserSELECT_, SQLiteParserSET_, SQLiteParserTABLE_, SQLiteParserTEMP_, SQLiteParserTEMPORARY_, SQLiteParserTHEN_, SQLiteParserTO_, SQLiteParserTRANSACTION_, SQLiteParserTRIGGER_, SQLiteParserUNION_, SQLiteParserUNIQUE_, SQLiteParserUPDATE_, SQLiteParserUSING_, SQLiteParserVACUUM_, SQLiteParserVALUES_, SQLiteParserVIEW_, SQLiteParserVIRTUAL_, SQLiteParserWHEN_, SQLiteParserWHERE_, SQLiteParserWITH_, SQLiteParserWITHOUT_, SQLiteParserFIRST_VALUE_, SQLiteParserOVER_, SQLiteParserPARTITION_, SQLiteParserRANGE_, SQLiteParserPRECEDING_, SQLiteParserUNBOUNDED_, SQLiteParserCURRENT_, SQLiteParserFOLLOWING_, SQLiteParserCUME_DIST_, SQLiteParserDENSE_RANK_, SQLiteParserLAG_, SQLiteParserLAST_VALUE_, SQLiteParserLEAD_, SQLiteParserNTH_VALUE_, SQLiteParserNTILE_, SQLiteParserPERCENT_RANK_, SQLiteParserRANK_, SQLiteParserROW_NUMBER_, SQLiteParserGENERATED_, SQLiteParserALWAYS_, SQLiteParserSTORED_, SQLiteParserTRUE_, SQLiteParserFALSE_, SQLiteParserWINDOW_, SQLiteParserNULLS_, SQLiteParserFIRST_, SQLiteParserLAST_, SQLiteParserFILTER_, SQLiteParserGROUPS_, SQLiteParserEXCLUDE_: - p.EnterOuterAlt(localctx, 2) - { - p.SetState(2047) - p.Keyword() - } - - case SQLiteParserSTRING_LITERAL: - p.EnterOuterAlt(localctx, 3) - { - p.SetState(2048) - p.Match(SQLiteParserSTRING_LITERAL) - } - - case SQLiteParserOPEN_PAR: - p.EnterOuterAlt(localctx, 4) - { - p.SetState(2049) - p.Match(SQLiteParserOPEN_PAR) - } - { - p.SetState(2050) - p.Any_name() - } - { - p.SetState(2051) - p.Match(SQLiteParserCLOSE_PAR) - } - - default: - panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) - } - - return localctx -} - -func (p *SQLiteParser) Sempred(localctx antlr.RuleContext, ruleIndex, predIndex int) bool { - switch ruleIndex { - case 32: - var t *ExprContext = nil - if localctx != nil { - t = localctx.(*ExprContext) - } - return p.Expr_Sempred(t, predIndex) - - default: - panic("No predicate with index: " + fmt.Sprint(ruleIndex)) - } -} - -func (p *SQLiteParser) Expr_Sempred(localctx antlr.RuleContext, predIndex int) bool { - this := p - _ = this - - switch predIndex { - case 0: - return p.Precpred(p.GetParserRuleContext(), 20) - - case 1: - return p.Precpred(p.GetParserRuleContext(), 19) - - case 2: - return p.Precpred(p.GetParserRuleContext(), 18) - - case 3: - return p.Precpred(p.GetParserRuleContext(), 17) - - case 4: - return p.Precpred(p.GetParserRuleContext(), 16) - - case 5: - return p.Precpred(p.GetParserRuleContext(), 15) - - case 6: - return p.Precpred(p.GetParserRuleContext(), 14) - - case 7: - return p.Precpred(p.GetParserRuleContext(), 13) - - case 8: - return p.Precpred(p.GetParserRuleContext(), 6) - - case 9: - return p.Precpred(p.GetParserRuleContext(), 5) - - case 10: - return p.Precpred(p.GetParserRuleContext(), 9) - - case 11: - return p.Precpred(p.GetParserRuleContext(), 8) - - case 12: - return p.Precpred(p.GetParserRuleContext(), 7) - - case 13: - return p.Precpred(p.GetParserRuleContext(), 4) - - default: - panic("No predicate with index: " + fmt.Sprint(predIndex)) - } -} +// Code generated from SQLiteParser.g4 by ANTLR 4.12.0. DO NOT EDIT. + +package sqliteparser // SQLiteParser +import ( + "fmt" + "strconv" + "sync" + + "github.com/antlr/antlr4/runtime/Go/antlr/v4" +) + +// Suppress unused import errors +var _ = fmt.Printf +var _ = strconv.Itoa +var _ = sync.Once{} + +type SQLiteParser struct { + *antlr.BaseParser +} + +var sqliteparserParserStaticData struct { + once sync.Once + serializedATN []int32 + literalNames []string + symbolicNames []string + ruleNames []string + predictionContextCache *antlr.PredictionContextCache + atn *antlr.ATN + decisionToDFA []*antlr.DFA +} + +func sqliteparserParserInit() { + staticData := &sqliteparserParserStaticData + staticData.literalNames = []string{ + "", "';'", "'.'", "'('", "')'", "','", "'='", "'*'", "'+'", "'-'", "'~'", + "'||'", "'/'", "'%'", "'<<'", "'>>'", "'&'", "'|'", "'<'", "'<='", "'>'", + "'>='", "'=='", "'!='", "'<>'", "'ABORT'", "'ACTION'", "'ADD'", "'AFTER'", + "'ALL'", "'ALTER'", "'ANALYZE'", "'AND'", "'AS'", "'ASC'", "'ATTACH'", + "'AUTOINCREMENT'", "'BEFORE'", "'BEGIN'", "'BETWEEN'", "'BY'", "'CASCADE'", + "'CASE'", "'CAST'", "'CHECK'", "'COLLATE'", "'COLUMN'", "'COMMIT'", + "'CONFLICT'", "'CONSTRAINT'", "'CREATE'", "'CROSS'", "'CURRENT_DATE'", + "'CURRENT_TIME'", "'CURRENT_TIMESTAMP'", "'DATABASE'", "'DEFAULT'", + "'DEFERRABLE'", "'DEFERRED'", "'DELETE'", "'DESC'", "'DETACH'", "'DISTINCT'", + "'DROP'", "'EACH'", "'ELSE'", "'END'", "'ESCAPE'", "'EXCEPT'", "'EXCLUSIVE'", + "'EXISTS'", "'EXPLAIN'", "'FAIL'", "'FOR'", "'FOREIGN'", "'FROM'", "'FULL'", + "'GLOB'", "'GROUP'", "'HAVING'", "'IF'", "'IGNORE'", "'IMMEDIATE'", + "'IN'", "'INDEX'", "'INDEXED'", "'INITIALLY'", "'INNER'", "'INSERT'", + "'INSTEAD'", "'INTERSECT'", "'INTO'", "'IS'", "'ISNULL'", "'JOIN'", + "'KEY'", "'LEFT'", "'LIKE'", "'LIMIT'", "'MATCH'", "'NATURAL'", "'NO'", + "'NOT'", "'NOTNULL'", "'NULL'", "'OF'", "'OFFSET'", "'ON'", "'OR'", + "'ORDER'", "'OUTER'", "'PLAN'", "'PRAGMA'", "'PRIMARY'", "'QUERY'", + "'RAISE'", "'RECURSIVE'", "'REFERENCES'", "'REGEXP'", "'REINDEX'", "'RELEASE'", + "'RENAME'", "'REPLACE'", "'RESTRICT'", "'RETURNING'", "'RIGHT'", "'ROLLBACK'", + "'ROW'", "'ROWS'", "'SAVEPOINT'", "'SELECT'", "'SET'", "'TABLE'", "'TEMP'", + "'TEMPORARY'", "'THEN'", "'TO'", "'TRANSACTION'", "'TRIGGER'", "'UNION'", + "'UNIQUE'", "'UPDATE'", "'USING'", "'VACUUM'", "'VALUES'", "'VIEW'", + "'VIRTUAL'", "'WHEN'", "'WHERE'", "'WITH'", "'WITHOUT'", "'FIRST_VALUE'", + "'OVER'", "'PARTITION'", "'RANGE'", "'PRECEDING'", "'UNBOUNDED'", "'CURRENT'", + "'FOLLOWING'", "'CUME_DIST'", "'DENSE_RANK'", "'LAG'", "'LAST_VALUE'", + "'LEAD'", "'NTH_VALUE'", "'NTILE'", "'PERCENT_RANK'", "'RANK'", "'ROW_NUMBER'", + "'GENERATED'", "'ALWAYS'", "'STORED'", "'TRUE'", "'FALSE'", "'WINDOW'", + "'NULLS'", "'FIRST'", "'LAST'", "'FILTER'", "'GROUPS'", "'EXCLUDE'", + "'TIES'", "'OTHERS'", "'DO'", "'NOTHING'", + } + staticData.symbolicNames = []string{ + "", "SCOL", "DOT", "OPEN_PAR", "CLOSE_PAR", "COMMA", "ASSIGN", "STAR", + "PLUS", "MINUS", "TILDE", "PIPE2", "DIV", "MOD", "LT2", "GT2", "AMP", + "PIPE", "LT", "LT_EQ", "GT", "GT_EQ", "EQ", "NOT_EQ1", "NOT_EQ2", "ABORT_", + "ACTION_", "ADD_", "AFTER_", "ALL_", "ALTER_", "ANALYZE_", "AND_", "AS_", + "ASC_", "ATTACH_", "AUTOINCREMENT_", "BEFORE_", "BEGIN_", "BETWEEN_", + "BY_", "CASCADE_", "CASE_", "CAST_", "CHECK_", "COLLATE_", "COLUMN_", + "COMMIT_", "CONFLICT_", "CONSTRAINT_", "CREATE_", "CROSS_", "CURRENT_DATE_", + "CURRENT_TIME_", "CURRENT_TIMESTAMP_", "DATABASE_", "DEFAULT_", "DEFERRABLE_", + "DEFERRED_", "DELETE_", "DESC_", "DETACH_", "DISTINCT_", "DROP_", "EACH_", + "ELSE_", "END_", "ESCAPE_", "EXCEPT_", "EXCLUSIVE_", "EXISTS_", "EXPLAIN_", + "FAIL_", "FOR_", "FOREIGN_", "FROM_", "FULL_", "GLOB_", "GROUP_", "HAVING_", + "IF_", "IGNORE_", "IMMEDIATE_", "IN_", "INDEX_", "INDEXED_", "INITIALLY_", + "INNER_", "INSERT_", "INSTEAD_", "INTERSECT_", "INTO_", "IS_", "ISNULL_", + "JOIN_", "KEY_", "LEFT_", "LIKE_", "LIMIT_", "MATCH_", "NATURAL_", "NO_", + "NOT_", "NOTNULL_", "NULL_", "OF_", "OFFSET_", "ON_", "OR_", "ORDER_", + "OUTER_", "PLAN_", "PRAGMA_", "PRIMARY_", "QUERY_", "RAISE_", "RECURSIVE_", + "REFERENCES_", "REGEXP_", "REINDEX_", "RELEASE_", "RENAME_", "REPLACE_", + "RESTRICT_", "RETURNING_", "RIGHT_", "ROLLBACK_", "ROW_", "ROWS_", "SAVEPOINT_", + "SELECT_", "SET_", "TABLE_", "TEMP_", "TEMPORARY_", "THEN_", "TO_", + "TRANSACTION_", "TRIGGER_", "UNION_", "UNIQUE_", "UPDATE_", "USING_", + "VACUUM_", "VALUES_", "VIEW_", "VIRTUAL_", "WHEN_", "WHERE_", "WITH_", + "WITHOUT_", "FIRST_VALUE_", "OVER_", "PARTITION_", "RANGE_", "PRECEDING_", + "UNBOUNDED_", "CURRENT_", "FOLLOWING_", "CUME_DIST_", "DENSE_RANK_", + "LAG_", "LAST_VALUE_", "LEAD_", "NTH_VALUE_", "NTILE_", "PERCENT_RANK_", + "RANK_", "ROW_NUMBER_", "GENERATED_", "ALWAYS_", "STORED_", "TRUE_", + "FALSE_", "WINDOW_", "NULLS_", "FIRST_", "LAST_", "FILTER_", "GROUPS_", + "EXCLUDE_", "TIES_", "OTHERS_", "DO_", "NOTHING_", "IDENTIFIER", "NUMERIC_LITERAL", + "BIND_PARAMETER", "STRING_LITERAL", "BLOB_LITERAL", "SINGLE_LINE_COMMENT", + "MULTILINE_COMMENT", "SPACES", "UNEXPECTED_CHAR", + } + staticData.ruleNames = []string{ + "parse", "sql_stmt_list", "sql_stmt", "alter_table_stmt", "analyze_stmt", + "attach_stmt", "begin_stmt", "commit_stmt", "rollback_stmt", "savepoint_stmt", + "release_stmt", "create_index_stmt", "indexed_column", "create_table_stmt", + "column_def", "type_name", "column_constraint", "signed_number", "table_constraint", + "foreign_key_clause", "conflict_clause", "create_trigger_stmt", "create_view_stmt", + "create_virtual_table_stmt", "with_clause", "cte_table_name", "recursive_cte", + "common_table_expression", "delete_stmt", "delete_stmt_limited", "detach_stmt", + "drop_stmt", "expr", "raise_function", "literal_value", "value_row", + "values_clause", "insert_stmt", "returning_clause", "upsert_clause", + "pragma_stmt", "pragma_value", "reindex_stmt", "select_stmt", "join_clause", + "select_core", "factored_select_stmt", "simple_select_stmt", "compound_select_stmt", + "table_or_subquery", "result_column", "join_operator", "join_constraint", + "compound_operator", "update_stmt", "column_name_list", "update_stmt_limited", + "qualified_table_name", "vacuum_stmt", "filter_clause", "window_defn", + "over_clause", "frame_spec", "frame_clause", "simple_function_invocation", + "aggregate_function_invocation", "window_function_invocation", "common_table_stmt", + "order_by_stmt", "limit_stmt", "ordering_term", "asc_desc", "frame_left", + "frame_right", "frame_single", "window_function", "offset", "default_value", + "partition_by", "order_by_expr", "order_by_expr_asc_desc", "expr_asc_desc", + "initial_select", "recursive_select", "unary_operator", "error_message", + "module_argument", "column_alias", "keyword", "name", "function_name", + "schema_name", "table_name", "table_or_index_name", "column_name", "collation_name", + "foreign_table", "index_name", "trigger_name", "view_name", "module_name", + "pragma_name", "savepoint_name", "table_alias", "transaction_name", + "window_name", "alias", "filename", "base_window_name", "simple_func", + "aggregate_func", "table_function_name", "any_name", + } + staticData.predictionContextCache = antlr.NewPredictionContextCache() + staticData.serializedATN = []int32{ + 4, 1, 193, 2056, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, + 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, + 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, + 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7, 20, + 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25, 2, + 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2, 31, + 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36, 7, + 36, 2, 37, 7, 37, 2, 38, 7, 38, 2, 39, 7, 39, 2, 40, 7, 40, 2, 41, 7, 41, + 2, 42, 7, 42, 2, 43, 7, 43, 2, 44, 7, 44, 2, 45, 7, 45, 2, 46, 7, 46, 2, + 47, 7, 47, 2, 48, 7, 48, 2, 49, 7, 49, 2, 50, 7, 50, 2, 51, 7, 51, 2, 52, + 7, 52, 2, 53, 7, 53, 2, 54, 7, 54, 2, 55, 7, 55, 2, 56, 7, 56, 2, 57, 7, + 57, 2, 58, 7, 58, 2, 59, 7, 59, 2, 60, 7, 60, 2, 61, 7, 61, 2, 62, 7, 62, + 2, 63, 7, 63, 2, 64, 7, 64, 2, 65, 7, 65, 2, 66, 7, 66, 2, 67, 7, 67, 2, + 68, 7, 68, 2, 69, 7, 69, 2, 70, 7, 70, 2, 71, 7, 71, 2, 72, 7, 72, 2, 73, + 7, 73, 2, 74, 7, 74, 2, 75, 7, 75, 2, 76, 7, 76, 2, 77, 7, 77, 2, 78, 7, + 78, 2, 79, 7, 79, 2, 80, 7, 80, 2, 81, 7, 81, 2, 82, 7, 82, 2, 83, 7, 83, + 2, 84, 7, 84, 2, 85, 7, 85, 2, 86, 7, 86, 2, 87, 7, 87, 2, 88, 7, 88, 2, + 89, 7, 89, 2, 90, 7, 90, 2, 91, 7, 91, 2, 92, 7, 92, 2, 93, 7, 93, 2, 94, + 7, 94, 2, 95, 7, 95, 2, 96, 7, 96, 2, 97, 7, 97, 2, 98, 7, 98, 2, 99, 7, + 99, 2, 100, 7, 100, 2, 101, 7, 101, 2, 102, 7, 102, 2, 103, 7, 103, 2, + 104, 7, 104, 2, 105, 7, 105, 2, 106, 7, 106, 2, 107, 7, 107, 2, 108, 7, + 108, 2, 109, 7, 109, 2, 110, 7, 110, 2, 111, 7, 111, 2, 112, 7, 112, 1, + 0, 5, 0, 228, 8, 0, 10, 0, 12, 0, 231, 9, 0, 1, 0, 1, 0, 1, 1, 5, 1, 236, + 8, 1, 10, 1, 12, 1, 239, 9, 1, 1, 1, 1, 1, 4, 1, 243, 8, 1, 11, 1, 12, + 1, 244, 1, 1, 5, 1, 248, 8, 1, 10, 1, 12, 1, 251, 9, 1, 1, 1, 5, 1, 254, + 8, 1, 10, 1, 12, 1, 257, 9, 1, 1, 2, 1, 2, 1, 2, 3, 2, 262, 8, 2, 3, 2, + 264, 8, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, + 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, + 1, 2, 1, 2, 3, 2, 290, 8, 2, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 3, 3, 297, 8, + 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 3, 3, 304, 8, 3, 1, 3, 1, 3, 1, 3, 1, + 3, 3, 3, 310, 8, 3, 1, 3, 1, 3, 3, 3, 314, 8, 3, 1, 3, 1, 3, 1, 3, 3, 3, + 319, 8, 3, 1, 3, 3, 3, 322, 8, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 3, 4, 329, + 8, 4, 1, 4, 3, 4, 332, 8, 4, 1, 5, 1, 5, 3, 5, 336, 8, 5, 1, 5, 1, 5, 1, + 5, 1, 5, 1, 6, 1, 6, 3, 6, 344, 8, 6, 1, 6, 1, 6, 3, 6, 348, 8, 6, 3, 6, + 350, 8, 6, 1, 7, 1, 7, 3, 7, 354, 8, 7, 1, 8, 1, 8, 3, 8, 358, 8, 8, 1, + 8, 1, 8, 3, 8, 362, 8, 8, 1, 8, 3, 8, 365, 8, 8, 1, 9, 1, 9, 1, 9, 1, 10, + 1, 10, 3, 10, 372, 8, 10, 1, 10, 1, 10, 1, 11, 1, 11, 3, 11, 378, 8, 11, + 1, 11, 1, 11, 1, 11, 1, 11, 3, 11, 384, 8, 11, 1, 11, 1, 11, 1, 11, 3, + 11, 389, 8, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 5, 11, + 398, 8, 11, 10, 11, 12, 11, 401, 9, 11, 1, 11, 1, 11, 1, 11, 3, 11, 406, + 8, 11, 1, 12, 1, 12, 3, 12, 410, 8, 12, 1, 12, 1, 12, 3, 12, 414, 8, 12, + 1, 12, 3, 12, 417, 8, 12, 1, 13, 1, 13, 3, 13, 421, 8, 13, 1, 13, 1, 13, + 1, 13, 1, 13, 3, 13, 427, 8, 13, 1, 13, 1, 13, 1, 13, 3, 13, 432, 8, 13, + 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 5, 13, 439, 8, 13, 10, 13, 12, 13, 442, + 9, 13, 1, 13, 1, 13, 5, 13, 446, 8, 13, 10, 13, 12, 13, 449, 9, 13, 1, + 13, 1, 13, 1, 13, 3, 13, 454, 8, 13, 1, 13, 1, 13, 3, 13, 458, 8, 13, 1, + 14, 1, 14, 3, 14, 462, 8, 14, 1, 14, 5, 14, 465, 8, 14, 10, 14, 12, 14, + 468, 9, 14, 1, 15, 4, 15, 471, 8, 15, 11, 15, 12, 15, 472, 1, 15, 1, 15, + 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 3, 15, 485, 8, + 15, 1, 16, 1, 16, 3, 16, 489, 8, 16, 1, 16, 1, 16, 1, 16, 3, 16, 494, 8, + 16, 1, 16, 3, 16, 497, 8, 16, 1, 16, 3, 16, 500, 8, 16, 1, 16, 3, 16, 503, + 8, 16, 1, 16, 1, 16, 3, 16, 507, 8, 16, 1, 16, 3, 16, 510, 8, 16, 1, 16, + 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, + 16, 3, 16, 524, 8, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 3, 16, 531, 8, + 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 3, 16, 538, 8, 16, 3, 16, 540, 8, + 16, 1, 17, 3, 17, 543, 8, 17, 1, 17, 1, 17, 1, 18, 1, 18, 3, 18, 549, 8, + 18, 1, 18, 1, 18, 1, 18, 3, 18, 554, 8, 18, 1, 18, 1, 18, 1, 18, 1, 18, + 5, 18, 560, 8, 18, 10, 18, 12, 18, 563, 9, 18, 1, 18, 1, 18, 3, 18, 567, + 8, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, + 18, 1, 18, 5, 18, 580, 8, 18, 10, 18, 12, 18, 583, 9, 18, 1, 18, 1, 18, + 1, 18, 3, 18, 588, 8, 18, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 5, + 19, 596, 8, 19, 10, 19, 12, 19, 599, 9, 19, 1, 19, 1, 19, 3, 19, 603, 8, + 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 3, 19, 613, + 8, 19, 1, 19, 1, 19, 5, 19, 617, 8, 19, 10, 19, 12, 19, 620, 9, 19, 1, + 19, 3, 19, 623, 8, 19, 1, 19, 1, 19, 1, 19, 3, 19, 628, 8, 19, 3, 19, 630, + 8, 19, 1, 20, 1, 20, 1, 20, 1, 20, 1, 21, 1, 21, 3, 21, 638, 8, 21, 1, + 21, 1, 21, 1, 21, 1, 21, 3, 21, 644, 8, 21, 1, 21, 1, 21, 1, 21, 3, 21, + 649, 8, 21, 1, 21, 1, 21, 1, 21, 1, 21, 1, 21, 3, 21, 656, 8, 21, 1, 21, + 1, 21, 1, 21, 1, 21, 1, 21, 1, 21, 1, 21, 5, 21, 665, 8, 21, 10, 21, 12, + 21, 668, 9, 21, 3, 21, 670, 8, 21, 3, 21, 672, 8, 21, 1, 21, 1, 21, 1, + 21, 1, 21, 1, 21, 3, 21, 679, 8, 21, 1, 21, 1, 21, 3, 21, 683, 8, 21, 1, + 21, 1, 21, 1, 21, 1, 21, 1, 21, 3, 21, 690, 8, 21, 1, 21, 1, 21, 4, 21, + 694, 8, 21, 11, 21, 12, 21, 695, 1, 21, 1, 21, 1, 22, 1, 22, 3, 22, 702, + 8, 22, 1, 22, 1, 22, 1, 22, 1, 22, 3, 22, 708, 8, 22, 1, 22, 1, 22, 1, + 22, 3, 22, 713, 8, 22, 1, 22, 1, 22, 1, 22, 1, 22, 1, 22, 5, 22, 720, 8, + 22, 10, 22, 12, 22, 723, 9, 22, 1, 22, 1, 22, 3, 22, 727, 8, 22, 1, 22, + 1, 22, 1, 22, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 1, 23, 3, 23, 738, 8, + 23, 1, 23, 1, 23, 1, 23, 3, 23, 743, 8, 23, 1, 23, 1, 23, 1, 23, 1, 23, + 1, 23, 1, 23, 1, 23, 5, 23, 752, 8, 23, 10, 23, 12, 23, 755, 9, 23, 1, + 23, 1, 23, 3, 23, 759, 8, 23, 1, 24, 1, 24, 3, 24, 763, 8, 24, 1, 24, 1, + 24, 1, 24, 1, 24, 1, 24, 1, 24, 1, 24, 1, 24, 1, 24, 1, 24, 1, 24, 1, 24, + 5, 24, 777, 8, 24, 10, 24, 12, 24, 780, 9, 24, 1, 25, 1, 25, 1, 25, 1, + 25, 1, 25, 5, 25, 787, 8, 25, 10, 25, 12, 25, 790, 9, 25, 1, 25, 1, 25, + 3, 25, 794, 8, 25, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 3, 26, 802, + 8, 26, 1, 26, 1, 26, 1, 26, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 5, 27, 812, + 8, 27, 10, 27, 12, 27, 815, 9, 27, 1, 27, 1, 27, 3, 27, 819, 8, 27, 1, + 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 28, 3, 28, 827, 8, 28, 1, 28, 1, 28, + 1, 28, 1, 28, 1, 28, 3, 28, 834, 8, 28, 1, 28, 3, 28, 837, 8, 28, 1, 29, + 3, 29, 840, 8, 29, 1, 29, 1, 29, 1, 29, 1, 29, 1, 29, 3, 29, 847, 8, 29, + 1, 29, 3, 29, 850, 8, 29, 1, 29, 3, 29, 853, 8, 29, 1, 29, 3, 29, 856, + 8, 29, 1, 30, 1, 30, 3, 30, 860, 8, 30, 1, 30, 1, 30, 1, 31, 1, 31, 1, + 31, 1, 31, 3, 31, 868, 8, 31, 1, 31, 1, 31, 1, 31, 3, 31, 873, 8, 31, 1, + 31, 1, 31, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 3, 32, 883, 8, 32, + 1, 32, 1, 32, 1, 32, 3, 32, 888, 8, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, + 32, 1, 32, 1, 32, 3, 32, 897, 8, 32, 1, 32, 1, 32, 1, 32, 5, 32, 902, 8, + 32, 10, 32, 12, 32, 905, 9, 32, 1, 32, 3, 32, 908, 8, 32, 1, 32, 1, 32, + 3, 32, 912, 8, 32, 1, 32, 3, 32, 915, 8, 32, 1, 32, 1, 32, 1, 32, 1, 32, + 5, 32, 921, 8, 32, 10, 32, 12, 32, 924, 9, 32, 1, 32, 1, 32, 1, 32, 1, + 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 3, 32, 936, 8, 32, 1, 32, + 3, 32, 939, 8, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 3, 32, 947, + 8, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 4, 32, 954, 8, 32, 11, 32, 12, + 32, 955, 1, 32, 1, 32, 3, 32, 960, 8, 32, 1, 32, 1, 32, 1, 32, 3, 32, 965, + 8, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, + 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, + 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 3, 32, 995, 8, + 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, + 3, 32, 1007, 8, 32, 1, 32, 1, 32, 1, 32, 3, 32, 1012, 8, 32, 1, 32, 1, + 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 3, 32, 1024, + 8, 32, 1, 32, 1, 32, 1, 32, 1, 32, 3, 32, 1030, 8, 32, 1, 32, 1, 32, 1, + 32, 1, 32, 1, 32, 3, 32, 1037, 8, 32, 1, 32, 1, 32, 3, 32, 1041, 8, 32, + 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 5, 32, 1049, 8, 32, 10, 32, 12, + 32, 1052, 9, 32, 3, 32, 1054, 8, 32, 1, 32, 1, 32, 1, 32, 1, 32, 3, 32, + 1060, 8, 32, 1, 32, 1, 32, 1, 32, 1, 32, 3, 32, 1066, 8, 32, 1, 32, 1, + 32, 1, 32, 1, 32, 1, 32, 5, 32, 1073, 8, 32, 10, 32, 12, 32, 1076, 9, 32, + 3, 32, 1078, 8, 32, 1, 32, 1, 32, 3, 32, 1082, 8, 32, 5, 32, 1084, 8, 32, + 10, 32, 12, 32, 1087, 9, 32, 1, 33, 1, 33, 1, 33, 1, 33, 1, 33, 1, 33, + 3, 33, 1095, 8, 33, 1, 33, 1, 33, 1, 34, 1, 34, 1, 35, 1, 35, 1, 35, 1, + 35, 5, 35, 1105, 8, 35, 10, 35, 12, 35, 1108, 9, 35, 1, 35, 1, 35, 1, 36, + 1, 36, 1, 36, 1, 36, 5, 36, 1116, 8, 36, 10, 36, 12, 36, 1119, 9, 36, 1, + 37, 3, 37, 1122, 8, 37, 1, 37, 1, 37, 1, 37, 1, 37, 1, 37, 3, 37, 1129, + 8, 37, 1, 37, 1, 37, 1, 37, 1, 37, 3, 37, 1135, 8, 37, 1, 37, 1, 37, 1, + 37, 3, 37, 1140, 8, 37, 1, 37, 1, 37, 1, 37, 1, 37, 5, 37, 1146, 8, 37, + 10, 37, 12, 37, 1149, 9, 37, 1, 37, 1, 37, 3, 37, 1153, 8, 37, 1, 37, 1, + 37, 3, 37, 1157, 8, 37, 1, 37, 3, 37, 1160, 8, 37, 1, 37, 1, 37, 3, 37, + 1164, 8, 37, 1, 37, 3, 37, 1167, 8, 37, 1, 38, 1, 38, 1, 38, 1, 38, 5, + 38, 1173, 8, 38, 10, 38, 12, 38, 1176, 9, 38, 1, 39, 1, 39, 1, 39, 1, 39, + 1, 39, 1, 39, 5, 39, 1184, 8, 39, 10, 39, 12, 39, 1187, 9, 39, 1, 39, 1, + 39, 1, 39, 3, 39, 1192, 8, 39, 3, 39, 1194, 8, 39, 1, 39, 1, 39, 1, 39, + 1, 39, 1, 39, 1, 39, 3, 39, 1202, 8, 39, 1, 39, 1, 39, 1, 39, 1, 39, 1, + 39, 3, 39, 1209, 8, 39, 1, 39, 1, 39, 1, 39, 5, 39, 1214, 8, 39, 10, 39, + 12, 39, 1217, 9, 39, 1, 39, 1, 39, 3, 39, 1221, 8, 39, 3, 39, 1223, 8, + 39, 1, 40, 1, 40, 1, 40, 1, 40, 3, 40, 1229, 8, 40, 1, 40, 1, 40, 1, 40, + 1, 40, 1, 40, 1, 40, 1, 40, 3, 40, 1238, 8, 40, 1, 41, 1, 41, 1, 41, 3, + 41, 1243, 8, 41, 1, 42, 1, 42, 1, 42, 1, 42, 1, 42, 3, 42, 1250, 8, 42, + 1, 42, 1, 42, 3, 42, 1254, 8, 42, 3, 42, 1256, 8, 42, 1, 43, 3, 43, 1259, + 8, 43, 1, 43, 1, 43, 1, 43, 1, 43, 5, 43, 1265, 8, 43, 10, 43, 12, 43, + 1268, 9, 43, 1, 43, 3, 43, 1271, 8, 43, 1, 43, 3, 43, 1274, 8, 43, 1, 44, + 1, 44, 1, 44, 1, 44, 3, 44, 1280, 8, 44, 5, 44, 1282, 8, 44, 10, 44, 12, + 44, 1285, 9, 44, 1, 45, 1, 45, 3, 45, 1289, 8, 45, 1, 45, 1, 45, 1, 45, + 5, 45, 1294, 8, 45, 10, 45, 12, 45, 1297, 9, 45, 1, 45, 1, 45, 1, 45, 1, + 45, 5, 45, 1303, 8, 45, 10, 45, 12, 45, 1306, 9, 45, 1, 45, 3, 45, 1309, + 8, 45, 3, 45, 1311, 8, 45, 1, 45, 1, 45, 3, 45, 1315, 8, 45, 1, 45, 1, + 45, 1, 45, 1, 45, 1, 45, 5, 45, 1322, 8, 45, 10, 45, 12, 45, 1325, 9, 45, + 1, 45, 1, 45, 3, 45, 1329, 8, 45, 3, 45, 1331, 8, 45, 1, 45, 1, 45, 1, + 45, 1, 45, 1, 45, 1, 45, 1, 45, 1, 45, 1, 45, 5, 45, 1342, 8, 45, 10, 45, + 12, 45, 1345, 9, 45, 3, 45, 1347, 8, 45, 1, 45, 3, 45, 1350, 8, 45, 1, + 46, 1, 46, 1, 47, 3, 47, 1355, 8, 47, 1, 47, 1, 47, 3, 47, 1359, 8, 47, + 1, 47, 3, 47, 1362, 8, 47, 1, 48, 3, 48, 1365, 8, 48, 1, 48, 1, 48, 1, + 48, 3, 48, 1370, 8, 48, 1, 48, 1, 48, 3, 48, 1374, 8, 48, 1, 48, 4, 48, + 1377, 8, 48, 11, 48, 12, 48, 1378, 1, 48, 3, 48, 1382, 8, 48, 1, 48, 3, + 48, 1385, 8, 48, 1, 49, 1, 49, 1, 49, 3, 49, 1390, 8, 49, 1, 49, 1, 49, + 3, 49, 1394, 8, 49, 1, 49, 3, 49, 1397, 8, 49, 1, 49, 1, 49, 1, 49, 1, + 49, 1, 49, 3, 49, 1404, 8, 49, 1, 49, 1, 49, 1, 49, 3, 49, 1409, 8, 49, + 1, 49, 1, 49, 1, 49, 1, 49, 1, 49, 5, 49, 1416, 8, 49, 10, 49, 12, 49, + 1419, 9, 49, 1, 49, 1, 49, 3, 49, 1423, 8, 49, 1, 49, 3, 49, 1426, 8, 49, + 1, 49, 1, 49, 1, 49, 1, 49, 5, 49, 1432, 8, 49, 10, 49, 12, 49, 1435, 9, + 49, 1, 49, 3, 49, 1438, 8, 49, 1, 49, 1, 49, 1, 49, 1, 49, 1, 49, 1, 49, + 3, 49, 1446, 8, 49, 1, 49, 3, 49, 1449, 8, 49, 3, 49, 1451, 8, 49, 1, 50, + 1, 50, 1, 50, 1, 50, 1, 50, 1, 50, 1, 50, 3, 50, 1460, 8, 50, 1, 50, 3, + 50, 1463, 8, 50, 3, 50, 1465, 8, 50, 1, 51, 1, 51, 3, 51, 1469, 8, 51, + 1, 51, 1, 51, 3, 51, 1473, 8, 51, 1, 51, 1, 51, 3, 51, 1477, 8, 51, 1, + 51, 3, 51, 1480, 8, 51, 1, 52, 1, 52, 1, 52, 1, 52, 1, 52, 1, 52, 1, 52, + 5, 52, 1489, 8, 52, 10, 52, 12, 52, 1492, 9, 52, 1, 52, 1, 52, 3, 52, 1496, + 8, 52, 1, 53, 1, 53, 3, 53, 1500, 8, 53, 1, 53, 1, 53, 3, 53, 1504, 8, + 53, 1, 54, 3, 54, 1507, 8, 54, 1, 54, 1, 54, 1, 54, 3, 54, 1512, 8, 54, + 1, 54, 1, 54, 1, 54, 1, 54, 3, 54, 1518, 8, 54, 1, 54, 1, 54, 1, 54, 1, + 54, 1, 54, 3, 54, 1525, 8, 54, 1, 54, 1, 54, 1, 54, 5, 54, 1530, 8, 54, + 10, 54, 12, 54, 1533, 9, 54, 1, 54, 1, 54, 1, 54, 1, 54, 5, 54, 1539, 8, + 54, 10, 54, 12, 54, 1542, 9, 54, 1, 54, 3, 54, 1545, 8, 54, 3, 54, 1547, + 8, 54, 1, 54, 1, 54, 3, 54, 1551, 8, 54, 1, 54, 3, 54, 1554, 8, 54, 1, + 55, 1, 55, 1, 55, 1, 55, 5, 55, 1560, 8, 55, 10, 55, 12, 55, 1563, 9, 55, + 1, 55, 1, 55, 1, 56, 3, 56, 1568, 8, 56, 1, 56, 1, 56, 1, 56, 3, 56, 1573, + 8, 56, 1, 56, 1, 56, 1, 56, 1, 56, 3, 56, 1579, 8, 56, 1, 56, 1, 56, 1, + 56, 1, 56, 1, 56, 3, 56, 1586, 8, 56, 1, 56, 1, 56, 1, 56, 5, 56, 1591, + 8, 56, 10, 56, 12, 56, 1594, 9, 56, 1, 56, 1, 56, 3, 56, 1598, 8, 56, 1, + 56, 3, 56, 1601, 8, 56, 1, 56, 3, 56, 1604, 8, 56, 1, 56, 3, 56, 1607, + 8, 56, 1, 57, 1, 57, 1, 57, 3, 57, 1612, 8, 57, 1, 57, 1, 57, 1, 57, 3, + 57, 1617, 8, 57, 1, 57, 1, 57, 1, 57, 1, 57, 1, 57, 3, 57, 1624, 8, 57, + 1, 58, 1, 58, 3, 58, 1628, 8, 58, 1, 58, 1, 58, 3, 58, 1632, 8, 58, 1, + 59, 1, 59, 1, 59, 1, 59, 1, 59, 1, 59, 1, 60, 1, 60, 3, 60, 1642, 8, 60, + 1, 60, 1, 60, 1, 60, 1, 60, 1, 60, 5, 60, 1649, 8, 60, 10, 60, 12, 60, + 1652, 9, 60, 3, 60, 1654, 8, 60, 1, 60, 1, 60, 1, 60, 1, 60, 1, 60, 5, + 60, 1661, 8, 60, 10, 60, 12, 60, 1664, 9, 60, 1, 60, 3, 60, 1667, 8, 60, + 1, 60, 1, 60, 1, 61, 1, 61, 1, 61, 1, 61, 3, 61, 1675, 8, 61, 1, 61, 1, + 61, 1, 61, 1, 61, 1, 61, 5, 61, 1682, 8, 61, 10, 61, 12, 61, 1685, 9, 61, + 3, 61, 1687, 8, 61, 1, 61, 1, 61, 1, 61, 1, 61, 1, 61, 5, 61, 1694, 8, + 61, 10, 61, 12, 61, 1697, 9, 61, 3, 61, 1699, 8, 61, 1, 61, 3, 61, 1702, + 8, 61, 1, 61, 3, 61, 1705, 8, 61, 1, 62, 1, 62, 1, 62, 1, 62, 1, 62, 1, + 62, 1, 62, 1, 62, 3, 62, 1715, 8, 62, 3, 62, 1717, 8, 62, 1, 63, 1, 63, + 1, 63, 1, 63, 1, 63, 1, 63, 1, 63, 3, 63, 1726, 8, 63, 1, 64, 1, 64, 1, + 64, 1, 64, 1, 64, 5, 64, 1733, 8, 64, 10, 64, 12, 64, 1736, 9, 64, 1, 64, + 3, 64, 1739, 8, 64, 1, 64, 1, 64, 1, 65, 1, 65, 1, 65, 3, 65, 1746, 8, + 65, 1, 65, 1, 65, 1, 65, 5, 65, 1751, 8, 65, 10, 65, 12, 65, 1754, 9, 65, + 1, 65, 3, 65, 1757, 8, 65, 1, 65, 1, 65, 3, 65, 1761, 8, 65, 1, 66, 1, + 66, 1, 66, 1, 66, 1, 66, 5, 66, 1768, 8, 66, 10, 66, 12, 66, 1771, 9, 66, + 1, 66, 3, 66, 1774, 8, 66, 1, 66, 1, 66, 3, 66, 1778, 8, 66, 1, 66, 1, + 66, 1, 66, 3, 66, 1783, 8, 66, 1, 67, 1, 67, 3, 67, 1787, 8, 67, 1, 67, + 1, 67, 1, 67, 5, 67, 1792, 8, 67, 10, 67, 12, 67, 1795, 9, 67, 1, 68, 1, + 68, 1, 68, 1, 68, 1, 68, 5, 68, 1802, 8, 68, 10, 68, 12, 68, 1805, 9, 68, + 1, 69, 1, 69, 1, 69, 1, 69, 3, 69, 1811, 8, 69, 1, 70, 1, 70, 1, 70, 3, + 70, 1816, 8, 70, 1, 70, 3, 70, 1819, 8, 70, 1, 70, 1, 70, 3, 70, 1823, + 8, 70, 1, 71, 1, 71, 1, 72, 1, 72, 1, 72, 1, 72, 1, 72, 1, 72, 1, 72, 1, + 72, 1, 72, 1, 72, 3, 72, 1837, 8, 72, 1, 73, 1, 73, 1, 73, 1, 73, 1, 73, + 1, 73, 1, 73, 1, 73, 1, 73, 1, 73, 3, 73, 1849, 8, 73, 1, 74, 1, 74, 1, + 74, 1, 74, 1, 74, 1, 74, 1, 74, 3, 74, 1858, 8, 74, 1, 75, 1, 75, 1, 75, + 1, 75, 1, 75, 1, 75, 1, 75, 3, 75, 1867, 8, 75, 1, 75, 1, 75, 3, 75, 1871, + 8, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 3, 75, 1881, + 8, 75, 1, 75, 3, 75, 1884, 8, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, + 75, 1, 75, 3, 75, 1893, 8, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, + 1, 75, 3, 75, 1902, 8, 75, 1, 75, 3, 75, 1905, 8, 75, 1, 75, 1, 75, 1, + 75, 1, 75, 3, 75, 1911, 8, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, + 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 3, 75, 1925, 8, 75, 1, 75, 1, + 75, 3, 75, 1929, 8, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, 1, 75, + 1, 75, 1, 75, 3, 75, 1940, 8, 75, 1, 75, 1, 75, 1, 75, 3, 75, 1945, 8, + 75, 1, 76, 1, 76, 1, 76, 1, 77, 1, 77, 1, 77, 1, 78, 1, 78, 1, 78, 4, 78, + 1956, 8, 78, 11, 78, 12, 78, 1957, 1, 79, 1, 79, 1, 79, 4, 79, 1963, 8, + 79, 11, 79, 12, 79, 1964, 1, 80, 1, 80, 1, 80, 1, 80, 1, 81, 1, 81, 3, + 81, 1973, 8, 81, 1, 81, 1, 81, 1, 81, 3, 81, 1978, 8, 81, 5, 81, 1980, + 8, 81, 10, 81, 12, 81, 1983, 9, 81, 1, 82, 1, 82, 1, 83, 1, 83, 1, 84, + 1, 84, 1, 85, 1, 85, 1, 86, 1, 86, 3, 86, 1995, 8, 86, 1, 87, 1, 87, 1, + 88, 1, 88, 1, 89, 1, 89, 1, 90, 1, 90, 1, 91, 1, 91, 1, 92, 1, 92, 1, 93, + 1, 93, 1, 94, 1, 94, 1, 95, 1, 95, 1, 96, 1, 96, 1, 97, 1, 97, 1, 98, 1, + 98, 1, 99, 1, 99, 1, 100, 1, 100, 1, 101, 1, 101, 1, 102, 1, 102, 1, 103, + 1, 103, 1, 104, 1, 104, 1, 105, 1, 105, 1, 106, 1, 106, 1, 107, 1, 107, + 1, 108, 1, 108, 1, 109, 1, 109, 1, 110, 1, 110, 1, 111, 1, 111, 1, 112, + 1, 112, 1, 112, 1, 112, 1, 112, 1, 112, 1, 112, 3, 112, 2054, 8, 112, 1, + 112, 2, 440, 472, 1, 64, 113, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, + 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, + 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, + 96, 98, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, + 126, 128, 130, 132, 134, 136, 138, 140, 142, 144, 146, 148, 150, 152, 154, + 156, 158, 160, 162, 164, 166, 168, 170, 172, 174, 176, 178, 180, 182, 184, + 186, 188, 190, 192, 194, 196, 198, 200, 202, 204, 206, 208, 210, 212, 214, + 216, 218, 220, 222, 224, 0, 28, 3, 0, 58, 58, 69, 69, 82, 82, 2, 0, 47, + 47, 66, 66, 1, 0, 133, 134, 2, 0, 146, 146, 171, 171, 1, 0, 8, 9, 2, 0, + 59, 59, 141, 141, 2, 0, 56, 56, 104, 104, 2, 0, 58, 58, 82, 82, 5, 0, 25, + 25, 72, 72, 81, 81, 122, 122, 126, 126, 4, 0, 84, 84, 132, 132, 138, 138, + 145, 145, 2, 0, 7, 7, 12, 13, 1, 0, 14, 17, 1, 0, 18, 21, 4, 0, 77, 77, + 97, 97, 99, 99, 118, 118, 3, 0, 25, 25, 72, 72, 126, 126, 5, 0, 52, 54, + 104, 104, 172, 173, 186, 186, 188, 189, 2, 0, 29, 29, 62, 62, 3, 0, 128, + 128, 154, 154, 179, 179, 2, 0, 5, 5, 106, 106, 1, 0, 176, 177, 2, 0, 34, + 34, 60, 60, 2, 0, 151, 151, 162, 162, 2, 0, 159, 159, 166, 166, 2, 0, 160, + 160, 167, 168, 2, 0, 161, 161, 163, 163, 2, 0, 8, 10, 102, 102, 2, 0, 185, + 185, 188, 188, 2, 0, 25, 123, 125, 180, 2338, 0, 229, 1, 0, 0, 0, 2, 237, + 1, 0, 0, 0, 4, 263, 1, 0, 0, 0, 6, 291, 1, 0, 0, 0, 8, 323, 1, 0, 0, 0, + 10, 333, 1, 0, 0, 0, 12, 341, 1, 0, 0, 0, 14, 351, 1, 0, 0, 0, 16, 355, + 1, 0, 0, 0, 18, 366, 1, 0, 0, 0, 20, 369, 1, 0, 0, 0, 22, 375, 1, 0, 0, + 0, 24, 409, 1, 0, 0, 0, 26, 418, 1, 0, 0, 0, 28, 459, 1, 0, 0, 0, 30, 470, + 1, 0, 0, 0, 32, 488, 1, 0, 0, 0, 34, 542, 1, 0, 0, 0, 36, 548, 1, 0, 0, + 0, 38, 589, 1, 0, 0, 0, 40, 631, 1, 0, 0, 0, 42, 635, 1, 0, 0, 0, 44, 699, + 1, 0, 0, 0, 46, 731, 1, 0, 0, 0, 48, 760, 1, 0, 0, 0, 50, 781, 1, 0, 0, + 0, 52, 795, 1, 0, 0, 0, 54, 806, 1, 0, 0, 0, 56, 826, 1, 0, 0, 0, 58, 839, + 1, 0, 0, 0, 60, 857, 1, 0, 0, 0, 62, 863, 1, 0, 0, 0, 64, 964, 1, 0, 0, + 0, 66, 1088, 1, 0, 0, 0, 68, 1098, 1, 0, 0, 0, 70, 1100, 1, 0, 0, 0, 72, + 1111, 1, 0, 0, 0, 74, 1121, 1, 0, 0, 0, 76, 1168, 1, 0, 0, 0, 78, 1177, + 1, 0, 0, 0, 80, 1224, 1, 0, 0, 0, 82, 1242, 1, 0, 0, 0, 84, 1244, 1, 0, + 0, 0, 86, 1258, 1, 0, 0, 0, 88, 1275, 1, 0, 0, 0, 90, 1349, 1, 0, 0, 0, + 92, 1351, 1, 0, 0, 0, 94, 1354, 1, 0, 0, 0, 96, 1364, 1, 0, 0, 0, 98, 1450, + 1, 0, 0, 0, 100, 1464, 1, 0, 0, 0, 102, 1479, 1, 0, 0, 0, 104, 1495, 1, + 0, 0, 0, 106, 1503, 1, 0, 0, 0, 108, 1506, 1, 0, 0, 0, 110, 1555, 1, 0, + 0, 0, 112, 1567, 1, 0, 0, 0, 114, 1611, 1, 0, 0, 0, 116, 1625, 1, 0, 0, + 0, 118, 1633, 1, 0, 0, 0, 120, 1639, 1, 0, 0, 0, 122, 1670, 1, 0, 0, 0, + 124, 1706, 1, 0, 0, 0, 126, 1718, 1, 0, 0, 0, 128, 1727, 1, 0, 0, 0, 130, + 1742, 1, 0, 0, 0, 132, 1762, 1, 0, 0, 0, 134, 1784, 1, 0, 0, 0, 136, 1796, + 1, 0, 0, 0, 138, 1806, 1, 0, 0, 0, 140, 1812, 1, 0, 0, 0, 142, 1824, 1, + 0, 0, 0, 144, 1836, 1, 0, 0, 0, 146, 1848, 1, 0, 0, 0, 148, 1857, 1, 0, + 0, 0, 150, 1944, 1, 0, 0, 0, 152, 1946, 1, 0, 0, 0, 154, 1949, 1, 0, 0, + 0, 156, 1952, 1, 0, 0, 0, 158, 1959, 1, 0, 0, 0, 160, 1966, 1, 0, 0, 0, + 162, 1970, 1, 0, 0, 0, 164, 1984, 1, 0, 0, 0, 166, 1986, 1, 0, 0, 0, 168, + 1988, 1, 0, 0, 0, 170, 1990, 1, 0, 0, 0, 172, 1994, 1, 0, 0, 0, 174, 1996, + 1, 0, 0, 0, 176, 1998, 1, 0, 0, 0, 178, 2000, 1, 0, 0, 0, 180, 2002, 1, + 0, 0, 0, 182, 2004, 1, 0, 0, 0, 184, 2006, 1, 0, 0, 0, 186, 2008, 1, 0, + 0, 0, 188, 2010, 1, 0, 0, 0, 190, 2012, 1, 0, 0, 0, 192, 2014, 1, 0, 0, + 0, 194, 2016, 1, 0, 0, 0, 196, 2018, 1, 0, 0, 0, 198, 2020, 1, 0, 0, 0, + 200, 2022, 1, 0, 0, 0, 202, 2024, 1, 0, 0, 0, 204, 2026, 1, 0, 0, 0, 206, + 2028, 1, 0, 0, 0, 208, 2030, 1, 0, 0, 0, 210, 2032, 1, 0, 0, 0, 212, 2034, + 1, 0, 0, 0, 214, 2036, 1, 0, 0, 0, 216, 2038, 1, 0, 0, 0, 218, 2040, 1, + 0, 0, 0, 220, 2042, 1, 0, 0, 0, 222, 2044, 1, 0, 0, 0, 224, 2053, 1, 0, + 0, 0, 226, 228, 3, 2, 1, 0, 227, 226, 1, 0, 0, 0, 228, 231, 1, 0, 0, 0, + 229, 227, 1, 0, 0, 0, 229, 230, 1, 0, 0, 0, 230, 232, 1, 0, 0, 0, 231, + 229, 1, 0, 0, 0, 232, 233, 5, 0, 0, 1, 233, 1, 1, 0, 0, 0, 234, 236, 5, + 1, 0, 0, 235, 234, 1, 0, 0, 0, 236, 239, 1, 0, 0, 0, 237, 235, 1, 0, 0, + 0, 237, 238, 1, 0, 0, 0, 238, 240, 1, 0, 0, 0, 239, 237, 1, 0, 0, 0, 240, + 249, 3, 4, 2, 0, 241, 243, 5, 1, 0, 0, 242, 241, 1, 0, 0, 0, 243, 244, + 1, 0, 0, 0, 244, 242, 1, 0, 0, 0, 244, 245, 1, 0, 0, 0, 245, 246, 1, 0, + 0, 0, 246, 248, 3, 4, 2, 0, 247, 242, 1, 0, 0, 0, 248, 251, 1, 0, 0, 0, + 249, 247, 1, 0, 0, 0, 249, 250, 1, 0, 0, 0, 250, 255, 1, 0, 0, 0, 251, + 249, 1, 0, 0, 0, 252, 254, 5, 1, 0, 0, 253, 252, 1, 0, 0, 0, 254, 257, + 1, 0, 0, 0, 255, 253, 1, 0, 0, 0, 255, 256, 1, 0, 0, 0, 256, 3, 1, 0, 0, + 0, 257, 255, 1, 0, 0, 0, 258, 261, 5, 71, 0, 0, 259, 260, 5, 114, 0, 0, + 260, 262, 5, 111, 0, 0, 261, 259, 1, 0, 0, 0, 261, 262, 1, 0, 0, 0, 262, + 264, 1, 0, 0, 0, 263, 258, 1, 0, 0, 0, 263, 264, 1, 0, 0, 0, 264, 289, + 1, 0, 0, 0, 265, 290, 3, 6, 3, 0, 266, 290, 3, 8, 4, 0, 267, 290, 3, 10, + 5, 0, 268, 290, 3, 12, 6, 0, 269, 290, 3, 14, 7, 0, 270, 290, 3, 22, 11, + 0, 271, 290, 3, 26, 13, 0, 272, 290, 3, 42, 21, 0, 273, 290, 3, 44, 22, + 0, 274, 290, 3, 46, 23, 0, 275, 290, 3, 56, 28, 0, 276, 290, 3, 58, 29, + 0, 277, 290, 3, 60, 30, 0, 278, 290, 3, 62, 31, 0, 279, 290, 3, 74, 37, + 0, 280, 290, 3, 80, 40, 0, 281, 290, 3, 84, 42, 0, 282, 290, 3, 20, 10, + 0, 283, 290, 3, 16, 8, 0, 284, 290, 3, 18, 9, 0, 285, 290, 3, 86, 43, 0, + 286, 290, 3, 108, 54, 0, 287, 290, 3, 112, 56, 0, 288, 290, 3, 116, 58, + 0, 289, 265, 1, 0, 0, 0, 289, 266, 1, 0, 0, 0, 289, 267, 1, 0, 0, 0, 289, + 268, 1, 0, 0, 0, 289, 269, 1, 0, 0, 0, 289, 270, 1, 0, 0, 0, 289, 271, + 1, 0, 0, 0, 289, 272, 1, 0, 0, 0, 289, 273, 1, 0, 0, 0, 289, 274, 1, 0, + 0, 0, 289, 275, 1, 0, 0, 0, 289, 276, 1, 0, 0, 0, 289, 277, 1, 0, 0, 0, + 289, 278, 1, 0, 0, 0, 289, 279, 1, 0, 0, 0, 289, 280, 1, 0, 0, 0, 289, + 281, 1, 0, 0, 0, 289, 282, 1, 0, 0, 0, 289, 283, 1, 0, 0, 0, 289, 284, + 1, 0, 0, 0, 289, 285, 1, 0, 0, 0, 289, 286, 1, 0, 0, 0, 289, 287, 1, 0, + 0, 0, 289, 288, 1, 0, 0, 0, 290, 5, 1, 0, 0, 0, 291, 292, 5, 30, 0, 0, + 292, 296, 5, 132, 0, 0, 293, 294, 3, 182, 91, 0, 294, 295, 5, 2, 0, 0, + 295, 297, 1, 0, 0, 0, 296, 293, 1, 0, 0, 0, 296, 297, 1, 0, 0, 0, 297, + 298, 1, 0, 0, 0, 298, 321, 3, 184, 92, 0, 299, 309, 5, 121, 0, 0, 300, + 301, 5, 136, 0, 0, 301, 310, 3, 184, 92, 0, 302, 304, 5, 46, 0, 0, 303, + 302, 1, 0, 0, 0, 303, 304, 1, 0, 0, 0, 304, 305, 1, 0, 0, 0, 305, 306, + 3, 188, 94, 0, 306, 307, 5, 136, 0, 0, 307, 308, 3, 188, 94, 0, 308, 310, + 1, 0, 0, 0, 309, 300, 1, 0, 0, 0, 309, 303, 1, 0, 0, 0, 310, 322, 1, 0, + 0, 0, 311, 313, 5, 27, 0, 0, 312, 314, 5, 46, 0, 0, 313, 312, 1, 0, 0, + 0, 313, 314, 1, 0, 0, 0, 314, 315, 1, 0, 0, 0, 315, 322, 3, 28, 14, 0, + 316, 318, 5, 63, 0, 0, 317, 319, 5, 46, 0, 0, 318, 317, 1, 0, 0, 0, 318, + 319, 1, 0, 0, 0, 319, 320, 1, 0, 0, 0, 320, 322, 3, 188, 94, 0, 321, 299, + 1, 0, 0, 0, 321, 311, 1, 0, 0, 0, 321, 316, 1, 0, 0, 0, 322, 7, 1, 0, 0, + 0, 323, 331, 5, 31, 0, 0, 324, 332, 3, 182, 91, 0, 325, 326, 3, 182, 91, + 0, 326, 327, 5, 2, 0, 0, 327, 329, 1, 0, 0, 0, 328, 325, 1, 0, 0, 0, 328, + 329, 1, 0, 0, 0, 329, 330, 1, 0, 0, 0, 330, 332, 3, 186, 93, 0, 331, 324, + 1, 0, 0, 0, 331, 328, 1, 0, 0, 0, 331, 332, 1, 0, 0, 0, 332, 9, 1, 0, 0, + 0, 333, 335, 5, 35, 0, 0, 334, 336, 5, 55, 0, 0, 335, 334, 1, 0, 0, 0, + 335, 336, 1, 0, 0, 0, 336, 337, 1, 0, 0, 0, 337, 338, 3, 64, 32, 0, 338, + 339, 5, 33, 0, 0, 339, 340, 3, 182, 91, 0, 340, 11, 1, 0, 0, 0, 341, 343, + 5, 38, 0, 0, 342, 344, 7, 0, 0, 0, 343, 342, 1, 0, 0, 0, 343, 344, 1, 0, + 0, 0, 344, 349, 1, 0, 0, 0, 345, 347, 5, 137, 0, 0, 346, 348, 3, 208, 104, + 0, 347, 346, 1, 0, 0, 0, 347, 348, 1, 0, 0, 0, 348, 350, 1, 0, 0, 0, 349, + 345, 1, 0, 0, 0, 349, 350, 1, 0, 0, 0, 350, 13, 1, 0, 0, 0, 351, 353, 7, + 1, 0, 0, 352, 354, 5, 137, 0, 0, 353, 352, 1, 0, 0, 0, 353, 354, 1, 0, + 0, 0, 354, 15, 1, 0, 0, 0, 355, 357, 5, 126, 0, 0, 356, 358, 5, 137, 0, + 0, 357, 356, 1, 0, 0, 0, 357, 358, 1, 0, 0, 0, 358, 364, 1, 0, 0, 0, 359, + 361, 5, 136, 0, 0, 360, 362, 5, 129, 0, 0, 361, 360, 1, 0, 0, 0, 361, 362, + 1, 0, 0, 0, 362, 363, 1, 0, 0, 0, 363, 365, 3, 204, 102, 0, 364, 359, 1, + 0, 0, 0, 364, 365, 1, 0, 0, 0, 365, 17, 1, 0, 0, 0, 366, 367, 5, 129, 0, + 0, 367, 368, 3, 204, 102, 0, 368, 19, 1, 0, 0, 0, 369, 371, 5, 120, 0, + 0, 370, 372, 5, 129, 0, 0, 371, 370, 1, 0, 0, 0, 371, 372, 1, 0, 0, 0, + 372, 373, 1, 0, 0, 0, 373, 374, 3, 204, 102, 0, 374, 21, 1, 0, 0, 0, 375, + 377, 5, 50, 0, 0, 376, 378, 5, 140, 0, 0, 377, 376, 1, 0, 0, 0, 377, 378, + 1, 0, 0, 0, 378, 379, 1, 0, 0, 0, 379, 383, 5, 84, 0, 0, 380, 381, 5, 80, + 0, 0, 381, 382, 5, 102, 0, 0, 382, 384, 5, 70, 0, 0, 383, 380, 1, 0, 0, + 0, 383, 384, 1, 0, 0, 0, 384, 388, 1, 0, 0, 0, 385, 386, 3, 182, 91, 0, + 386, 387, 5, 2, 0, 0, 387, 389, 1, 0, 0, 0, 388, 385, 1, 0, 0, 0, 388, + 389, 1, 0, 0, 0, 389, 390, 1, 0, 0, 0, 390, 391, 3, 194, 97, 0, 391, 392, + 5, 107, 0, 0, 392, 393, 3, 184, 92, 0, 393, 394, 5, 3, 0, 0, 394, 399, + 3, 24, 12, 0, 395, 396, 5, 5, 0, 0, 396, 398, 3, 24, 12, 0, 397, 395, 1, + 0, 0, 0, 398, 401, 1, 0, 0, 0, 399, 397, 1, 0, 0, 0, 399, 400, 1, 0, 0, + 0, 400, 402, 1, 0, 0, 0, 401, 399, 1, 0, 0, 0, 402, 405, 5, 4, 0, 0, 403, + 404, 5, 148, 0, 0, 404, 406, 3, 64, 32, 0, 405, 403, 1, 0, 0, 0, 405, 406, + 1, 0, 0, 0, 406, 23, 1, 0, 0, 0, 407, 410, 3, 188, 94, 0, 408, 410, 3, + 64, 32, 0, 409, 407, 1, 0, 0, 0, 409, 408, 1, 0, 0, 0, 410, 413, 1, 0, + 0, 0, 411, 412, 5, 45, 0, 0, 412, 414, 3, 190, 95, 0, 413, 411, 1, 0, 0, + 0, 413, 414, 1, 0, 0, 0, 414, 416, 1, 0, 0, 0, 415, 417, 3, 142, 71, 0, + 416, 415, 1, 0, 0, 0, 416, 417, 1, 0, 0, 0, 417, 25, 1, 0, 0, 0, 418, 420, + 5, 50, 0, 0, 419, 421, 7, 2, 0, 0, 420, 419, 1, 0, 0, 0, 420, 421, 1, 0, + 0, 0, 421, 422, 1, 0, 0, 0, 422, 426, 5, 132, 0, 0, 423, 424, 5, 80, 0, + 0, 424, 425, 5, 102, 0, 0, 425, 427, 5, 70, 0, 0, 426, 423, 1, 0, 0, 0, + 426, 427, 1, 0, 0, 0, 427, 431, 1, 0, 0, 0, 428, 429, 3, 182, 91, 0, 429, + 430, 5, 2, 0, 0, 430, 432, 1, 0, 0, 0, 431, 428, 1, 0, 0, 0, 431, 432, + 1, 0, 0, 0, 432, 433, 1, 0, 0, 0, 433, 457, 3, 184, 92, 0, 434, 435, 5, + 3, 0, 0, 435, 440, 3, 28, 14, 0, 436, 437, 5, 5, 0, 0, 437, 439, 3, 28, + 14, 0, 438, 436, 1, 0, 0, 0, 439, 442, 1, 0, 0, 0, 440, 441, 1, 0, 0, 0, + 440, 438, 1, 0, 0, 0, 441, 447, 1, 0, 0, 0, 442, 440, 1, 0, 0, 0, 443, + 444, 5, 5, 0, 0, 444, 446, 3, 36, 18, 0, 445, 443, 1, 0, 0, 0, 446, 449, + 1, 0, 0, 0, 447, 445, 1, 0, 0, 0, 447, 448, 1, 0, 0, 0, 448, 450, 1, 0, + 0, 0, 449, 447, 1, 0, 0, 0, 450, 453, 5, 4, 0, 0, 451, 452, 5, 150, 0, + 0, 452, 454, 5, 185, 0, 0, 453, 451, 1, 0, 0, 0, 453, 454, 1, 0, 0, 0, + 454, 458, 1, 0, 0, 0, 455, 456, 5, 33, 0, 0, 456, 458, 3, 86, 43, 0, 457, + 434, 1, 0, 0, 0, 457, 455, 1, 0, 0, 0, 458, 27, 1, 0, 0, 0, 459, 461, 3, + 188, 94, 0, 460, 462, 3, 30, 15, 0, 461, 460, 1, 0, 0, 0, 461, 462, 1, + 0, 0, 0, 462, 466, 1, 0, 0, 0, 463, 465, 3, 32, 16, 0, 464, 463, 1, 0, + 0, 0, 465, 468, 1, 0, 0, 0, 466, 464, 1, 0, 0, 0, 466, 467, 1, 0, 0, 0, + 467, 29, 1, 0, 0, 0, 468, 466, 1, 0, 0, 0, 469, 471, 3, 178, 89, 0, 470, + 469, 1, 0, 0, 0, 471, 472, 1, 0, 0, 0, 472, 473, 1, 0, 0, 0, 472, 470, + 1, 0, 0, 0, 473, 484, 1, 0, 0, 0, 474, 475, 5, 3, 0, 0, 475, 476, 3, 34, + 17, 0, 476, 477, 5, 4, 0, 0, 477, 485, 1, 0, 0, 0, 478, 479, 5, 3, 0, 0, + 479, 480, 3, 34, 17, 0, 480, 481, 5, 5, 0, 0, 481, 482, 3, 34, 17, 0, 482, + 483, 5, 4, 0, 0, 483, 485, 1, 0, 0, 0, 484, 474, 1, 0, 0, 0, 484, 478, + 1, 0, 0, 0, 484, 485, 1, 0, 0, 0, 485, 31, 1, 0, 0, 0, 486, 487, 5, 49, + 0, 0, 487, 489, 3, 178, 89, 0, 488, 486, 1, 0, 0, 0, 488, 489, 1, 0, 0, + 0, 489, 539, 1, 0, 0, 0, 490, 491, 5, 113, 0, 0, 491, 493, 5, 95, 0, 0, + 492, 494, 3, 142, 71, 0, 493, 492, 1, 0, 0, 0, 493, 494, 1, 0, 0, 0, 494, + 496, 1, 0, 0, 0, 495, 497, 3, 40, 20, 0, 496, 495, 1, 0, 0, 0, 496, 497, + 1, 0, 0, 0, 497, 499, 1, 0, 0, 0, 498, 500, 5, 36, 0, 0, 499, 498, 1, 0, + 0, 0, 499, 500, 1, 0, 0, 0, 500, 540, 1, 0, 0, 0, 501, 503, 5, 102, 0, + 0, 502, 501, 1, 0, 0, 0, 502, 503, 1, 0, 0, 0, 503, 504, 1, 0, 0, 0, 504, + 507, 5, 104, 0, 0, 505, 507, 5, 140, 0, 0, 506, 502, 1, 0, 0, 0, 506, 505, + 1, 0, 0, 0, 507, 509, 1, 0, 0, 0, 508, 510, 3, 40, 20, 0, 509, 508, 1, + 0, 0, 0, 509, 510, 1, 0, 0, 0, 510, 540, 1, 0, 0, 0, 511, 512, 5, 44, 0, + 0, 512, 513, 5, 3, 0, 0, 513, 514, 3, 64, 32, 0, 514, 515, 5, 4, 0, 0, + 515, 540, 1, 0, 0, 0, 516, 523, 5, 56, 0, 0, 517, 524, 3, 34, 17, 0, 518, + 524, 3, 68, 34, 0, 519, 520, 5, 3, 0, 0, 520, 521, 3, 64, 32, 0, 521, 522, + 5, 4, 0, 0, 522, 524, 1, 0, 0, 0, 523, 517, 1, 0, 0, 0, 523, 518, 1, 0, + 0, 0, 523, 519, 1, 0, 0, 0, 524, 540, 1, 0, 0, 0, 525, 526, 5, 45, 0, 0, + 526, 540, 3, 190, 95, 0, 527, 540, 3, 38, 19, 0, 528, 529, 5, 169, 0, 0, + 529, 531, 5, 170, 0, 0, 530, 528, 1, 0, 0, 0, 530, 531, 1, 0, 0, 0, 531, + 532, 1, 0, 0, 0, 532, 533, 5, 33, 0, 0, 533, 534, 5, 3, 0, 0, 534, 535, + 3, 64, 32, 0, 535, 537, 5, 4, 0, 0, 536, 538, 7, 3, 0, 0, 537, 536, 1, + 0, 0, 0, 537, 538, 1, 0, 0, 0, 538, 540, 1, 0, 0, 0, 539, 490, 1, 0, 0, + 0, 539, 506, 1, 0, 0, 0, 539, 511, 1, 0, 0, 0, 539, 516, 1, 0, 0, 0, 539, + 525, 1, 0, 0, 0, 539, 527, 1, 0, 0, 0, 539, 530, 1, 0, 0, 0, 540, 33, 1, + 0, 0, 0, 541, 543, 7, 4, 0, 0, 542, 541, 1, 0, 0, 0, 542, 543, 1, 0, 0, + 0, 543, 544, 1, 0, 0, 0, 544, 545, 5, 186, 0, 0, 545, 35, 1, 0, 0, 0, 546, + 547, 5, 49, 0, 0, 547, 549, 3, 178, 89, 0, 548, 546, 1, 0, 0, 0, 548, 549, + 1, 0, 0, 0, 549, 587, 1, 0, 0, 0, 550, 551, 5, 113, 0, 0, 551, 554, 5, + 95, 0, 0, 552, 554, 5, 140, 0, 0, 553, 550, 1, 0, 0, 0, 553, 552, 1, 0, + 0, 0, 554, 555, 1, 0, 0, 0, 555, 556, 5, 3, 0, 0, 556, 561, 3, 24, 12, + 0, 557, 558, 5, 5, 0, 0, 558, 560, 3, 24, 12, 0, 559, 557, 1, 0, 0, 0, + 560, 563, 1, 0, 0, 0, 561, 559, 1, 0, 0, 0, 561, 562, 1, 0, 0, 0, 562, + 564, 1, 0, 0, 0, 563, 561, 1, 0, 0, 0, 564, 566, 5, 4, 0, 0, 565, 567, + 3, 40, 20, 0, 566, 565, 1, 0, 0, 0, 566, 567, 1, 0, 0, 0, 567, 588, 1, + 0, 0, 0, 568, 569, 5, 44, 0, 0, 569, 570, 5, 3, 0, 0, 570, 571, 3, 64, + 32, 0, 571, 572, 5, 4, 0, 0, 572, 588, 1, 0, 0, 0, 573, 574, 5, 74, 0, + 0, 574, 575, 5, 95, 0, 0, 575, 576, 5, 3, 0, 0, 576, 581, 3, 188, 94, 0, + 577, 578, 5, 5, 0, 0, 578, 580, 3, 188, 94, 0, 579, 577, 1, 0, 0, 0, 580, + 583, 1, 0, 0, 0, 581, 579, 1, 0, 0, 0, 581, 582, 1, 0, 0, 0, 582, 584, + 1, 0, 0, 0, 583, 581, 1, 0, 0, 0, 584, 585, 5, 4, 0, 0, 585, 586, 3, 38, + 19, 0, 586, 588, 1, 0, 0, 0, 587, 553, 1, 0, 0, 0, 587, 568, 1, 0, 0, 0, + 587, 573, 1, 0, 0, 0, 588, 37, 1, 0, 0, 0, 589, 590, 5, 117, 0, 0, 590, + 602, 3, 192, 96, 0, 591, 592, 5, 3, 0, 0, 592, 597, 3, 188, 94, 0, 593, + 594, 5, 5, 0, 0, 594, 596, 3, 188, 94, 0, 595, 593, 1, 0, 0, 0, 596, 599, + 1, 0, 0, 0, 597, 595, 1, 0, 0, 0, 597, 598, 1, 0, 0, 0, 598, 600, 1, 0, + 0, 0, 599, 597, 1, 0, 0, 0, 600, 601, 5, 4, 0, 0, 601, 603, 1, 0, 0, 0, + 602, 591, 1, 0, 0, 0, 602, 603, 1, 0, 0, 0, 603, 618, 1, 0, 0, 0, 604, + 605, 5, 107, 0, 0, 605, 612, 7, 5, 0, 0, 606, 607, 5, 131, 0, 0, 607, 613, + 7, 6, 0, 0, 608, 613, 5, 41, 0, 0, 609, 613, 5, 123, 0, 0, 610, 611, 5, + 101, 0, 0, 611, 613, 5, 26, 0, 0, 612, 606, 1, 0, 0, 0, 612, 608, 1, 0, + 0, 0, 612, 609, 1, 0, 0, 0, 612, 610, 1, 0, 0, 0, 613, 617, 1, 0, 0, 0, + 614, 615, 5, 99, 0, 0, 615, 617, 3, 178, 89, 0, 616, 604, 1, 0, 0, 0, 616, + 614, 1, 0, 0, 0, 617, 620, 1, 0, 0, 0, 618, 616, 1, 0, 0, 0, 618, 619, + 1, 0, 0, 0, 619, 629, 1, 0, 0, 0, 620, 618, 1, 0, 0, 0, 621, 623, 5, 102, + 0, 0, 622, 621, 1, 0, 0, 0, 622, 623, 1, 0, 0, 0, 623, 624, 1, 0, 0, 0, + 624, 627, 5, 57, 0, 0, 625, 626, 5, 86, 0, 0, 626, 628, 7, 7, 0, 0, 627, + 625, 1, 0, 0, 0, 627, 628, 1, 0, 0, 0, 628, 630, 1, 0, 0, 0, 629, 622, + 1, 0, 0, 0, 629, 630, 1, 0, 0, 0, 630, 39, 1, 0, 0, 0, 631, 632, 5, 107, + 0, 0, 632, 633, 5, 48, 0, 0, 633, 634, 7, 8, 0, 0, 634, 41, 1, 0, 0, 0, + 635, 637, 5, 50, 0, 0, 636, 638, 7, 2, 0, 0, 637, 636, 1, 0, 0, 0, 637, + 638, 1, 0, 0, 0, 638, 639, 1, 0, 0, 0, 639, 643, 5, 138, 0, 0, 640, 641, + 5, 80, 0, 0, 641, 642, 5, 102, 0, 0, 642, 644, 5, 70, 0, 0, 643, 640, 1, + 0, 0, 0, 643, 644, 1, 0, 0, 0, 644, 648, 1, 0, 0, 0, 645, 646, 3, 182, + 91, 0, 646, 647, 5, 2, 0, 0, 647, 649, 1, 0, 0, 0, 648, 645, 1, 0, 0, 0, + 648, 649, 1, 0, 0, 0, 649, 650, 1, 0, 0, 0, 650, 655, 3, 196, 98, 0, 651, + 656, 5, 37, 0, 0, 652, 656, 5, 28, 0, 0, 653, 654, 5, 89, 0, 0, 654, 656, + 5, 105, 0, 0, 655, 651, 1, 0, 0, 0, 655, 652, 1, 0, 0, 0, 655, 653, 1, + 0, 0, 0, 655, 656, 1, 0, 0, 0, 656, 671, 1, 0, 0, 0, 657, 672, 5, 59, 0, + 0, 658, 672, 5, 88, 0, 0, 659, 669, 5, 141, 0, 0, 660, 661, 5, 105, 0, + 0, 661, 666, 3, 188, 94, 0, 662, 663, 5, 5, 0, 0, 663, 665, 3, 188, 94, + 0, 664, 662, 1, 0, 0, 0, 665, 668, 1, 0, 0, 0, 666, 664, 1, 0, 0, 0, 666, + 667, 1, 0, 0, 0, 667, 670, 1, 0, 0, 0, 668, 666, 1, 0, 0, 0, 669, 660, + 1, 0, 0, 0, 669, 670, 1, 0, 0, 0, 670, 672, 1, 0, 0, 0, 671, 657, 1, 0, + 0, 0, 671, 658, 1, 0, 0, 0, 671, 659, 1, 0, 0, 0, 672, 673, 1, 0, 0, 0, + 673, 674, 5, 107, 0, 0, 674, 678, 3, 184, 92, 0, 675, 676, 5, 73, 0, 0, + 676, 677, 5, 64, 0, 0, 677, 679, 5, 127, 0, 0, 678, 675, 1, 0, 0, 0, 678, + 679, 1, 0, 0, 0, 679, 682, 1, 0, 0, 0, 680, 681, 5, 147, 0, 0, 681, 683, + 3, 64, 32, 0, 682, 680, 1, 0, 0, 0, 682, 683, 1, 0, 0, 0, 683, 684, 1, + 0, 0, 0, 684, 693, 5, 38, 0, 0, 685, 690, 3, 108, 54, 0, 686, 690, 3, 74, + 37, 0, 687, 690, 3, 56, 28, 0, 688, 690, 3, 86, 43, 0, 689, 685, 1, 0, + 0, 0, 689, 686, 1, 0, 0, 0, 689, 687, 1, 0, 0, 0, 689, 688, 1, 0, 0, 0, + 690, 691, 1, 0, 0, 0, 691, 692, 5, 1, 0, 0, 692, 694, 1, 0, 0, 0, 693, + 689, 1, 0, 0, 0, 694, 695, 1, 0, 0, 0, 695, 693, 1, 0, 0, 0, 695, 696, + 1, 0, 0, 0, 696, 697, 1, 0, 0, 0, 697, 698, 5, 66, 0, 0, 698, 43, 1, 0, + 0, 0, 699, 701, 5, 50, 0, 0, 700, 702, 7, 2, 0, 0, 701, 700, 1, 0, 0, 0, + 701, 702, 1, 0, 0, 0, 702, 703, 1, 0, 0, 0, 703, 707, 5, 145, 0, 0, 704, + 705, 5, 80, 0, 0, 705, 706, 5, 102, 0, 0, 706, 708, 5, 70, 0, 0, 707, 704, + 1, 0, 0, 0, 707, 708, 1, 0, 0, 0, 708, 712, 1, 0, 0, 0, 709, 710, 3, 182, + 91, 0, 710, 711, 5, 2, 0, 0, 711, 713, 1, 0, 0, 0, 712, 709, 1, 0, 0, 0, + 712, 713, 1, 0, 0, 0, 713, 714, 1, 0, 0, 0, 714, 726, 3, 198, 99, 0, 715, + 716, 5, 3, 0, 0, 716, 721, 3, 188, 94, 0, 717, 718, 5, 5, 0, 0, 718, 720, + 3, 188, 94, 0, 719, 717, 1, 0, 0, 0, 720, 723, 1, 0, 0, 0, 721, 719, 1, + 0, 0, 0, 721, 722, 1, 0, 0, 0, 722, 724, 1, 0, 0, 0, 723, 721, 1, 0, 0, + 0, 724, 725, 5, 4, 0, 0, 725, 727, 1, 0, 0, 0, 726, 715, 1, 0, 0, 0, 726, + 727, 1, 0, 0, 0, 727, 728, 1, 0, 0, 0, 728, 729, 5, 33, 0, 0, 729, 730, + 3, 86, 43, 0, 730, 45, 1, 0, 0, 0, 731, 732, 5, 50, 0, 0, 732, 733, 5, + 146, 0, 0, 733, 737, 5, 132, 0, 0, 734, 735, 5, 80, 0, 0, 735, 736, 5, + 102, 0, 0, 736, 738, 5, 70, 0, 0, 737, 734, 1, 0, 0, 0, 737, 738, 1, 0, + 0, 0, 738, 742, 1, 0, 0, 0, 739, 740, 3, 182, 91, 0, 740, 741, 5, 2, 0, + 0, 741, 743, 1, 0, 0, 0, 742, 739, 1, 0, 0, 0, 742, 743, 1, 0, 0, 0, 743, + 744, 1, 0, 0, 0, 744, 745, 3, 184, 92, 0, 745, 746, 5, 142, 0, 0, 746, + 758, 3, 200, 100, 0, 747, 748, 5, 3, 0, 0, 748, 753, 3, 172, 86, 0, 749, + 750, 5, 5, 0, 0, 750, 752, 3, 172, 86, 0, 751, 749, 1, 0, 0, 0, 752, 755, + 1, 0, 0, 0, 753, 751, 1, 0, 0, 0, 753, 754, 1, 0, 0, 0, 754, 756, 1, 0, + 0, 0, 755, 753, 1, 0, 0, 0, 756, 757, 5, 4, 0, 0, 757, 759, 1, 0, 0, 0, + 758, 747, 1, 0, 0, 0, 758, 759, 1, 0, 0, 0, 759, 47, 1, 0, 0, 0, 760, 762, + 5, 149, 0, 0, 761, 763, 5, 116, 0, 0, 762, 761, 1, 0, 0, 0, 762, 763, 1, + 0, 0, 0, 763, 764, 1, 0, 0, 0, 764, 765, 3, 50, 25, 0, 765, 766, 5, 33, + 0, 0, 766, 767, 5, 3, 0, 0, 767, 768, 3, 86, 43, 0, 768, 778, 5, 4, 0, + 0, 769, 770, 5, 5, 0, 0, 770, 771, 3, 50, 25, 0, 771, 772, 5, 33, 0, 0, + 772, 773, 5, 3, 0, 0, 773, 774, 3, 86, 43, 0, 774, 775, 5, 4, 0, 0, 775, + 777, 1, 0, 0, 0, 776, 769, 1, 0, 0, 0, 777, 780, 1, 0, 0, 0, 778, 776, + 1, 0, 0, 0, 778, 779, 1, 0, 0, 0, 779, 49, 1, 0, 0, 0, 780, 778, 1, 0, + 0, 0, 781, 793, 3, 184, 92, 0, 782, 783, 5, 3, 0, 0, 783, 788, 3, 188, + 94, 0, 784, 785, 5, 5, 0, 0, 785, 787, 3, 188, 94, 0, 786, 784, 1, 0, 0, + 0, 787, 790, 1, 0, 0, 0, 788, 786, 1, 0, 0, 0, 788, 789, 1, 0, 0, 0, 789, + 791, 1, 0, 0, 0, 790, 788, 1, 0, 0, 0, 791, 792, 5, 4, 0, 0, 792, 794, + 1, 0, 0, 0, 793, 782, 1, 0, 0, 0, 793, 794, 1, 0, 0, 0, 794, 51, 1, 0, + 0, 0, 795, 796, 3, 50, 25, 0, 796, 797, 5, 33, 0, 0, 797, 798, 5, 3, 0, + 0, 798, 799, 3, 164, 82, 0, 799, 801, 5, 139, 0, 0, 800, 802, 5, 29, 0, + 0, 801, 800, 1, 0, 0, 0, 801, 802, 1, 0, 0, 0, 802, 803, 1, 0, 0, 0, 803, + 804, 3, 166, 83, 0, 804, 805, 5, 4, 0, 0, 805, 53, 1, 0, 0, 0, 806, 818, + 3, 184, 92, 0, 807, 808, 5, 3, 0, 0, 808, 813, 3, 188, 94, 0, 809, 810, + 5, 5, 0, 0, 810, 812, 3, 188, 94, 0, 811, 809, 1, 0, 0, 0, 812, 815, 1, + 0, 0, 0, 813, 811, 1, 0, 0, 0, 813, 814, 1, 0, 0, 0, 814, 816, 1, 0, 0, + 0, 815, 813, 1, 0, 0, 0, 816, 817, 5, 4, 0, 0, 817, 819, 1, 0, 0, 0, 818, + 807, 1, 0, 0, 0, 818, 819, 1, 0, 0, 0, 819, 820, 1, 0, 0, 0, 820, 821, + 5, 33, 0, 0, 821, 822, 5, 3, 0, 0, 822, 823, 3, 86, 43, 0, 823, 824, 5, + 4, 0, 0, 824, 55, 1, 0, 0, 0, 825, 827, 3, 48, 24, 0, 826, 825, 1, 0, 0, + 0, 826, 827, 1, 0, 0, 0, 827, 828, 1, 0, 0, 0, 828, 829, 5, 59, 0, 0, 829, + 830, 5, 75, 0, 0, 830, 833, 3, 114, 57, 0, 831, 832, 5, 148, 0, 0, 832, + 834, 3, 64, 32, 0, 833, 831, 1, 0, 0, 0, 833, 834, 1, 0, 0, 0, 834, 836, + 1, 0, 0, 0, 835, 837, 3, 76, 38, 0, 836, 835, 1, 0, 0, 0, 836, 837, 1, + 0, 0, 0, 837, 57, 1, 0, 0, 0, 838, 840, 3, 48, 24, 0, 839, 838, 1, 0, 0, + 0, 839, 840, 1, 0, 0, 0, 840, 841, 1, 0, 0, 0, 841, 842, 5, 59, 0, 0, 842, + 843, 5, 75, 0, 0, 843, 846, 3, 114, 57, 0, 844, 845, 5, 148, 0, 0, 845, + 847, 3, 64, 32, 0, 846, 844, 1, 0, 0, 0, 846, 847, 1, 0, 0, 0, 847, 849, + 1, 0, 0, 0, 848, 850, 3, 76, 38, 0, 849, 848, 1, 0, 0, 0, 849, 850, 1, + 0, 0, 0, 850, 855, 1, 0, 0, 0, 851, 853, 3, 136, 68, 0, 852, 851, 1, 0, + 0, 0, 852, 853, 1, 0, 0, 0, 853, 854, 1, 0, 0, 0, 854, 856, 3, 138, 69, + 0, 855, 852, 1, 0, 0, 0, 855, 856, 1, 0, 0, 0, 856, 59, 1, 0, 0, 0, 857, + 859, 5, 61, 0, 0, 858, 860, 5, 55, 0, 0, 859, 858, 1, 0, 0, 0, 859, 860, + 1, 0, 0, 0, 860, 861, 1, 0, 0, 0, 861, 862, 3, 182, 91, 0, 862, 61, 1, + 0, 0, 0, 863, 864, 5, 63, 0, 0, 864, 867, 7, 9, 0, 0, 865, 866, 5, 80, + 0, 0, 866, 868, 5, 70, 0, 0, 867, 865, 1, 0, 0, 0, 867, 868, 1, 0, 0, 0, + 868, 872, 1, 0, 0, 0, 869, 870, 3, 182, 91, 0, 870, 871, 5, 2, 0, 0, 871, + 873, 1, 0, 0, 0, 872, 869, 1, 0, 0, 0, 872, 873, 1, 0, 0, 0, 873, 874, + 1, 0, 0, 0, 874, 875, 3, 224, 112, 0, 875, 63, 1, 0, 0, 0, 876, 877, 6, + 32, -1, 0, 877, 965, 3, 68, 34, 0, 878, 965, 5, 187, 0, 0, 879, 880, 3, + 182, 91, 0, 880, 881, 5, 2, 0, 0, 881, 883, 1, 0, 0, 0, 882, 879, 1, 0, + 0, 0, 882, 883, 1, 0, 0, 0, 883, 884, 1, 0, 0, 0, 884, 885, 3, 184, 92, + 0, 885, 886, 5, 2, 0, 0, 886, 888, 1, 0, 0, 0, 887, 882, 1, 0, 0, 0, 887, + 888, 1, 0, 0, 0, 888, 889, 1, 0, 0, 0, 889, 965, 3, 188, 94, 0, 890, 891, + 3, 168, 84, 0, 891, 892, 3, 64, 32, 21, 892, 965, 1, 0, 0, 0, 893, 894, + 3, 180, 90, 0, 894, 907, 5, 3, 0, 0, 895, 897, 5, 62, 0, 0, 896, 895, 1, + 0, 0, 0, 896, 897, 1, 0, 0, 0, 897, 898, 1, 0, 0, 0, 898, 903, 3, 64, 32, + 0, 899, 900, 5, 5, 0, 0, 900, 902, 3, 64, 32, 0, 901, 899, 1, 0, 0, 0, + 902, 905, 1, 0, 0, 0, 903, 901, 1, 0, 0, 0, 903, 904, 1, 0, 0, 0, 904, + 908, 1, 0, 0, 0, 905, 903, 1, 0, 0, 0, 906, 908, 5, 7, 0, 0, 907, 896, + 1, 0, 0, 0, 907, 906, 1, 0, 0, 0, 907, 908, 1, 0, 0, 0, 908, 909, 1, 0, + 0, 0, 909, 911, 5, 4, 0, 0, 910, 912, 3, 118, 59, 0, 911, 910, 1, 0, 0, + 0, 911, 912, 1, 0, 0, 0, 912, 914, 1, 0, 0, 0, 913, 915, 3, 122, 61, 0, + 914, 913, 1, 0, 0, 0, 914, 915, 1, 0, 0, 0, 915, 965, 1, 0, 0, 0, 916, + 917, 5, 3, 0, 0, 917, 922, 3, 64, 32, 0, 918, 919, 5, 5, 0, 0, 919, 921, + 3, 64, 32, 0, 920, 918, 1, 0, 0, 0, 921, 924, 1, 0, 0, 0, 922, 920, 1, + 0, 0, 0, 922, 923, 1, 0, 0, 0, 923, 925, 1, 0, 0, 0, 924, 922, 1, 0, 0, + 0, 925, 926, 5, 4, 0, 0, 926, 965, 1, 0, 0, 0, 927, 928, 5, 43, 0, 0, 928, + 929, 5, 3, 0, 0, 929, 930, 3, 64, 32, 0, 930, 931, 5, 33, 0, 0, 931, 932, + 3, 30, 15, 0, 932, 933, 5, 4, 0, 0, 933, 965, 1, 0, 0, 0, 934, 936, 5, + 102, 0, 0, 935, 934, 1, 0, 0, 0, 935, 936, 1, 0, 0, 0, 936, 937, 1, 0, + 0, 0, 937, 939, 5, 70, 0, 0, 938, 935, 1, 0, 0, 0, 938, 939, 1, 0, 0, 0, + 939, 940, 1, 0, 0, 0, 940, 941, 5, 3, 0, 0, 941, 942, 3, 86, 43, 0, 942, + 943, 5, 4, 0, 0, 943, 965, 1, 0, 0, 0, 944, 946, 5, 42, 0, 0, 945, 947, + 3, 64, 32, 0, 946, 945, 1, 0, 0, 0, 946, 947, 1, 0, 0, 0, 947, 953, 1, + 0, 0, 0, 948, 949, 5, 147, 0, 0, 949, 950, 3, 64, 32, 0, 950, 951, 5, 135, + 0, 0, 951, 952, 3, 64, 32, 0, 952, 954, 1, 0, 0, 0, 953, 948, 1, 0, 0, + 0, 954, 955, 1, 0, 0, 0, 955, 953, 1, 0, 0, 0, 955, 956, 1, 0, 0, 0, 956, + 959, 1, 0, 0, 0, 957, 958, 5, 65, 0, 0, 958, 960, 3, 64, 32, 0, 959, 957, + 1, 0, 0, 0, 959, 960, 1, 0, 0, 0, 960, 961, 1, 0, 0, 0, 961, 962, 5, 66, + 0, 0, 962, 965, 1, 0, 0, 0, 963, 965, 3, 66, 33, 0, 964, 876, 1, 0, 0, + 0, 964, 878, 1, 0, 0, 0, 964, 887, 1, 0, 0, 0, 964, 890, 1, 0, 0, 0, 964, + 893, 1, 0, 0, 0, 964, 916, 1, 0, 0, 0, 964, 927, 1, 0, 0, 0, 964, 938, + 1, 0, 0, 0, 964, 944, 1, 0, 0, 0, 964, 963, 1, 0, 0, 0, 965, 1085, 1, 0, + 0, 0, 966, 967, 10, 20, 0, 0, 967, 968, 5, 11, 0, 0, 968, 1084, 3, 64, + 32, 21, 969, 970, 10, 19, 0, 0, 970, 971, 7, 10, 0, 0, 971, 1084, 3, 64, + 32, 20, 972, 973, 10, 18, 0, 0, 973, 974, 7, 4, 0, 0, 974, 1084, 3, 64, + 32, 19, 975, 976, 10, 17, 0, 0, 976, 977, 7, 11, 0, 0, 977, 1084, 3, 64, + 32, 18, 978, 979, 10, 16, 0, 0, 979, 980, 7, 12, 0, 0, 980, 1084, 3, 64, + 32, 17, 981, 994, 10, 15, 0, 0, 982, 995, 5, 6, 0, 0, 983, 995, 5, 22, + 0, 0, 984, 995, 5, 23, 0, 0, 985, 995, 5, 24, 0, 0, 986, 995, 5, 92, 0, + 0, 987, 988, 5, 92, 0, 0, 988, 995, 5, 102, 0, 0, 989, 995, 5, 83, 0, 0, + 990, 995, 5, 97, 0, 0, 991, 995, 5, 77, 0, 0, 992, 995, 5, 99, 0, 0, 993, + 995, 5, 118, 0, 0, 994, 982, 1, 0, 0, 0, 994, 983, 1, 0, 0, 0, 994, 984, + 1, 0, 0, 0, 994, 985, 1, 0, 0, 0, 994, 986, 1, 0, 0, 0, 994, 987, 1, 0, + 0, 0, 994, 989, 1, 0, 0, 0, 994, 990, 1, 0, 0, 0, 994, 991, 1, 0, 0, 0, + 994, 992, 1, 0, 0, 0, 994, 993, 1, 0, 0, 0, 995, 996, 1, 0, 0, 0, 996, + 1084, 3, 64, 32, 16, 997, 998, 10, 14, 0, 0, 998, 999, 5, 32, 0, 0, 999, + 1084, 3, 64, 32, 15, 1000, 1001, 10, 13, 0, 0, 1001, 1002, 5, 108, 0, 0, + 1002, 1084, 3, 64, 32, 14, 1003, 1004, 10, 6, 0, 0, 1004, 1006, 5, 92, + 0, 0, 1005, 1007, 5, 102, 0, 0, 1006, 1005, 1, 0, 0, 0, 1006, 1007, 1, + 0, 0, 0, 1007, 1008, 1, 0, 0, 0, 1008, 1084, 3, 64, 32, 7, 1009, 1011, + 10, 5, 0, 0, 1010, 1012, 5, 102, 0, 0, 1011, 1010, 1, 0, 0, 0, 1011, 1012, + 1, 0, 0, 0, 1012, 1013, 1, 0, 0, 0, 1013, 1014, 5, 39, 0, 0, 1014, 1015, + 3, 64, 32, 0, 1015, 1016, 5, 32, 0, 0, 1016, 1017, 3, 64, 32, 6, 1017, + 1084, 1, 0, 0, 0, 1018, 1019, 10, 9, 0, 0, 1019, 1020, 5, 45, 0, 0, 1020, + 1084, 3, 190, 95, 0, 1021, 1023, 10, 8, 0, 0, 1022, 1024, 5, 102, 0, 0, + 1023, 1022, 1, 0, 0, 0, 1023, 1024, 1, 0, 0, 0, 1024, 1025, 1, 0, 0, 0, + 1025, 1026, 7, 13, 0, 0, 1026, 1029, 3, 64, 32, 0, 1027, 1028, 5, 67, 0, + 0, 1028, 1030, 3, 64, 32, 0, 1029, 1027, 1, 0, 0, 0, 1029, 1030, 1, 0, + 0, 0, 1030, 1084, 1, 0, 0, 0, 1031, 1036, 10, 7, 0, 0, 1032, 1037, 5, 93, + 0, 0, 1033, 1037, 5, 103, 0, 0, 1034, 1035, 5, 102, 0, 0, 1035, 1037, 5, + 104, 0, 0, 1036, 1032, 1, 0, 0, 0, 1036, 1033, 1, 0, 0, 0, 1036, 1034, + 1, 0, 0, 0, 1037, 1084, 1, 0, 0, 0, 1038, 1040, 10, 4, 0, 0, 1039, 1041, + 5, 102, 0, 0, 1040, 1039, 1, 0, 0, 0, 1040, 1041, 1, 0, 0, 0, 1041, 1042, + 1, 0, 0, 0, 1042, 1081, 5, 83, 0, 0, 1043, 1053, 5, 3, 0, 0, 1044, 1054, + 3, 86, 43, 0, 1045, 1050, 3, 64, 32, 0, 1046, 1047, 5, 5, 0, 0, 1047, 1049, + 3, 64, 32, 0, 1048, 1046, 1, 0, 0, 0, 1049, 1052, 1, 0, 0, 0, 1050, 1048, + 1, 0, 0, 0, 1050, 1051, 1, 0, 0, 0, 1051, 1054, 1, 0, 0, 0, 1052, 1050, + 1, 0, 0, 0, 1053, 1044, 1, 0, 0, 0, 1053, 1045, 1, 0, 0, 0, 1053, 1054, + 1, 0, 0, 0, 1054, 1055, 1, 0, 0, 0, 1055, 1082, 5, 4, 0, 0, 1056, 1057, + 3, 182, 91, 0, 1057, 1058, 5, 2, 0, 0, 1058, 1060, 1, 0, 0, 0, 1059, 1056, + 1, 0, 0, 0, 1059, 1060, 1, 0, 0, 0, 1060, 1061, 1, 0, 0, 0, 1061, 1082, + 3, 184, 92, 0, 1062, 1063, 3, 182, 91, 0, 1063, 1064, 5, 2, 0, 0, 1064, + 1066, 1, 0, 0, 0, 1065, 1062, 1, 0, 0, 0, 1065, 1066, 1, 0, 0, 0, 1066, + 1067, 1, 0, 0, 0, 1067, 1068, 3, 222, 111, 0, 1068, 1077, 5, 3, 0, 0, 1069, + 1074, 3, 64, 32, 0, 1070, 1071, 5, 5, 0, 0, 1071, 1073, 3, 64, 32, 0, 1072, + 1070, 1, 0, 0, 0, 1073, 1076, 1, 0, 0, 0, 1074, 1072, 1, 0, 0, 0, 1074, + 1075, 1, 0, 0, 0, 1075, 1078, 1, 0, 0, 0, 1076, 1074, 1, 0, 0, 0, 1077, + 1069, 1, 0, 0, 0, 1077, 1078, 1, 0, 0, 0, 1078, 1079, 1, 0, 0, 0, 1079, + 1080, 5, 4, 0, 0, 1080, 1082, 1, 0, 0, 0, 1081, 1043, 1, 0, 0, 0, 1081, + 1059, 1, 0, 0, 0, 1081, 1065, 1, 0, 0, 0, 1082, 1084, 1, 0, 0, 0, 1083, + 966, 1, 0, 0, 0, 1083, 969, 1, 0, 0, 0, 1083, 972, 1, 0, 0, 0, 1083, 975, + 1, 0, 0, 0, 1083, 978, 1, 0, 0, 0, 1083, 981, 1, 0, 0, 0, 1083, 997, 1, + 0, 0, 0, 1083, 1000, 1, 0, 0, 0, 1083, 1003, 1, 0, 0, 0, 1083, 1009, 1, + 0, 0, 0, 1083, 1018, 1, 0, 0, 0, 1083, 1021, 1, 0, 0, 0, 1083, 1031, 1, + 0, 0, 0, 1083, 1038, 1, 0, 0, 0, 1084, 1087, 1, 0, 0, 0, 1085, 1083, 1, + 0, 0, 0, 1085, 1086, 1, 0, 0, 0, 1086, 65, 1, 0, 0, 0, 1087, 1085, 1, 0, + 0, 0, 1088, 1089, 5, 115, 0, 0, 1089, 1094, 5, 3, 0, 0, 1090, 1095, 5, + 81, 0, 0, 1091, 1092, 7, 14, 0, 0, 1092, 1093, 5, 5, 0, 0, 1093, 1095, + 3, 170, 85, 0, 1094, 1090, 1, 0, 0, 0, 1094, 1091, 1, 0, 0, 0, 1095, 1096, + 1, 0, 0, 0, 1096, 1097, 5, 4, 0, 0, 1097, 67, 1, 0, 0, 0, 1098, 1099, 7, + 15, 0, 0, 1099, 69, 1, 0, 0, 0, 1100, 1101, 5, 3, 0, 0, 1101, 1106, 3, + 64, 32, 0, 1102, 1103, 5, 5, 0, 0, 1103, 1105, 3, 64, 32, 0, 1104, 1102, + 1, 0, 0, 0, 1105, 1108, 1, 0, 0, 0, 1106, 1104, 1, 0, 0, 0, 1106, 1107, + 1, 0, 0, 0, 1107, 1109, 1, 0, 0, 0, 1108, 1106, 1, 0, 0, 0, 1109, 1110, + 5, 4, 0, 0, 1110, 71, 1, 0, 0, 0, 1111, 1112, 5, 144, 0, 0, 1112, 1117, + 3, 70, 35, 0, 1113, 1114, 5, 5, 0, 0, 1114, 1116, 3, 70, 35, 0, 1115, 1113, + 1, 0, 0, 0, 1116, 1119, 1, 0, 0, 0, 1117, 1115, 1, 0, 0, 0, 1117, 1118, + 1, 0, 0, 0, 1118, 73, 1, 0, 0, 0, 1119, 1117, 1, 0, 0, 0, 1120, 1122, 3, + 48, 24, 0, 1121, 1120, 1, 0, 0, 0, 1121, 1122, 1, 0, 0, 0, 1122, 1128, + 1, 0, 0, 0, 1123, 1129, 5, 88, 0, 0, 1124, 1129, 5, 122, 0, 0, 1125, 1126, + 5, 88, 0, 0, 1126, 1127, 5, 108, 0, 0, 1127, 1129, 7, 8, 0, 0, 1128, 1123, + 1, 0, 0, 0, 1128, 1124, 1, 0, 0, 0, 1128, 1125, 1, 0, 0, 0, 1129, 1130, + 1, 0, 0, 0, 1130, 1134, 5, 91, 0, 0, 1131, 1132, 3, 182, 91, 0, 1132, 1133, + 5, 2, 0, 0, 1133, 1135, 1, 0, 0, 0, 1134, 1131, 1, 0, 0, 0, 1134, 1135, + 1, 0, 0, 0, 1135, 1136, 1, 0, 0, 0, 1136, 1139, 3, 184, 92, 0, 1137, 1138, + 5, 33, 0, 0, 1138, 1140, 3, 206, 103, 0, 1139, 1137, 1, 0, 0, 0, 1139, + 1140, 1, 0, 0, 0, 1140, 1152, 1, 0, 0, 0, 1141, 1142, 5, 3, 0, 0, 1142, + 1147, 3, 188, 94, 0, 1143, 1144, 5, 5, 0, 0, 1144, 1146, 3, 188, 94, 0, + 1145, 1143, 1, 0, 0, 0, 1146, 1149, 1, 0, 0, 0, 1147, 1145, 1, 0, 0, 0, + 1147, 1148, 1, 0, 0, 0, 1148, 1150, 1, 0, 0, 0, 1149, 1147, 1, 0, 0, 0, + 1150, 1151, 5, 4, 0, 0, 1151, 1153, 1, 0, 0, 0, 1152, 1141, 1, 0, 0, 0, + 1152, 1153, 1, 0, 0, 0, 1153, 1163, 1, 0, 0, 0, 1154, 1157, 3, 72, 36, + 0, 1155, 1157, 3, 86, 43, 0, 1156, 1154, 1, 0, 0, 0, 1156, 1155, 1, 0, + 0, 0, 1157, 1159, 1, 0, 0, 0, 1158, 1160, 3, 78, 39, 0, 1159, 1158, 1, + 0, 0, 0, 1159, 1160, 1, 0, 0, 0, 1160, 1164, 1, 0, 0, 0, 1161, 1162, 5, + 56, 0, 0, 1162, 1164, 5, 144, 0, 0, 1163, 1156, 1, 0, 0, 0, 1163, 1161, + 1, 0, 0, 0, 1164, 1166, 1, 0, 0, 0, 1165, 1167, 3, 76, 38, 0, 1166, 1165, + 1, 0, 0, 0, 1166, 1167, 1, 0, 0, 0, 1167, 75, 1, 0, 0, 0, 1168, 1169, 5, + 124, 0, 0, 1169, 1174, 3, 100, 50, 0, 1170, 1171, 5, 5, 0, 0, 1171, 1173, + 3, 100, 50, 0, 1172, 1170, 1, 0, 0, 0, 1173, 1176, 1, 0, 0, 0, 1174, 1172, + 1, 0, 0, 0, 1174, 1175, 1, 0, 0, 0, 1175, 77, 1, 0, 0, 0, 1176, 1174, 1, + 0, 0, 0, 1177, 1178, 5, 107, 0, 0, 1178, 1193, 5, 48, 0, 0, 1179, 1180, + 5, 3, 0, 0, 1180, 1185, 3, 24, 12, 0, 1181, 1182, 5, 5, 0, 0, 1182, 1184, + 3, 24, 12, 0, 1183, 1181, 1, 0, 0, 0, 1184, 1187, 1, 0, 0, 0, 1185, 1183, + 1, 0, 0, 0, 1185, 1186, 1, 0, 0, 0, 1186, 1188, 1, 0, 0, 0, 1187, 1185, + 1, 0, 0, 0, 1188, 1191, 5, 4, 0, 0, 1189, 1190, 5, 148, 0, 0, 1190, 1192, + 3, 64, 32, 0, 1191, 1189, 1, 0, 0, 0, 1191, 1192, 1, 0, 0, 0, 1192, 1194, + 1, 0, 0, 0, 1193, 1179, 1, 0, 0, 0, 1193, 1194, 1, 0, 0, 0, 1194, 1195, + 1, 0, 0, 0, 1195, 1222, 5, 183, 0, 0, 1196, 1223, 5, 184, 0, 0, 1197, 1198, + 5, 141, 0, 0, 1198, 1201, 5, 131, 0, 0, 1199, 1202, 3, 188, 94, 0, 1200, + 1202, 3, 110, 55, 0, 1201, 1199, 1, 0, 0, 0, 1201, 1200, 1, 0, 0, 0, 1202, + 1203, 1, 0, 0, 0, 1203, 1204, 5, 6, 0, 0, 1204, 1215, 3, 64, 32, 0, 1205, + 1208, 5, 5, 0, 0, 1206, 1209, 3, 188, 94, 0, 1207, 1209, 3, 110, 55, 0, + 1208, 1206, 1, 0, 0, 0, 1208, 1207, 1, 0, 0, 0, 1209, 1210, 1, 0, 0, 0, + 1210, 1211, 5, 6, 0, 0, 1211, 1212, 3, 64, 32, 0, 1212, 1214, 1, 0, 0, + 0, 1213, 1205, 1, 0, 0, 0, 1214, 1217, 1, 0, 0, 0, 1215, 1213, 1, 0, 0, + 0, 1215, 1216, 1, 0, 0, 0, 1216, 1220, 1, 0, 0, 0, 1217, 1215, 1, 0, 0, + 0, 1218, 1219, 5, 148, 0, 0, 1219, 1221, 3, 64, 32, 0, 1220, 1218, 1, 0, + 0, 0, 1220, 1221, 1, 0, 0, 0, 1221, 1223, 1, 0, 0, 0, 1222, 1196, 1, 0, + 0, 0, 1222, 1197, 1, 0, 0, 0, 1223, 79, 1, 0, 0, 0, 1224, 1228, 5, 112, + 0, 0, 1225, 1226, 3, 182, 91, 0, 1226, 1227, 5, 2, 0, 0, 1227, 1229, 1, + 0, 0, 0, 1228, 1225, 1, 0, 0, 0, 1228, 1229, 1, 0, 0, 0, 1229, 1230, 1, + 0, 0, 0, 1230, 1237, 3, 202, 101, 0, 1231, 1232, 5, 6, 0, 0, 1232, 1238, + 3, 82, 41, 0, 1233, 1234, 5, 3, 0, 0, 1234, 1235, 3, 82, 41, 0, 1235, 1236, + 5, 4, 0, 0, 1236, 1238, 1, 0, 0, 0, 1237, 1231, 1, 0, 0, 0, 1237, 1233, + 1, 0, 0, 0, 1237, 1238, 1, 0, 0, 0, 1238, 81, 1, 0, 0, 0, 1239, 1243, 3, + 34, 17, 0, 1240, 1243, 3, 178, 89, 0, 1241, 1243, 5, 188, 0, 0, 1242, 1239, + 1, 0, 0, 0, 1242, 1240, 1, 0, 0, 0, 1242, 1241, 1, 0, 0, 0, 1243, 83, 1, + 0, 0, 0, 1244, 1255, 5, 119, 0, 0, 1245, 1256, 3, 190, 95, 0, 1246, 1247, + 3, 182, 91, 0, 1247, 1248, 5, 2, 0, 0, 1248, 1250, 1, 0, 0, 0, 1249, 1246, + 1, 0, 0, 0, 1249, 1250, 1, 0, 0, 0, 1250, 1253, 1, 0, 0, 0, 1251, 1254, + 3, 184, 92, 0, 1252, 1254, 3, 194, 97, 0, 1253, 1251, 1, 0, 0, 0, 1253, + 1252, 1, 0, 0, 0, 1254, 1256, 1, 0, 0, 0, 1255, 1245, 1, 0, 0, 0, 1255, + 1249, 1, 0, 0, 0, 1255, 1256, 1, 0, 0, 0, 1256, 85, 1, 0, 0, 0, 1257, 1259, + 3, 134, 67, 0, 1258, 1257, 1, 0, 0, 0, 1258, 1259, 1, 0, 0, 0, 1259, 1260, + 1, 0, 0, 0, 1260, 1266, 3, 90, 45, 0, 1261, 1262, 3, 106, 53, 0, 1262, + 1263, 3, 90, 45, 0, 1263, 1265, 1, 0, 0, 0, 1264, 1261, 1, 0, 0, 0, 1265, + 1268, 1, 0, 0, 0, 1266, 1264, 1, 0, 0, 0, 1266, 1267, 1, 0, 0, 0, 1267, + 1270, 1, 0, 0, 0, 1268, 1266, 1, 0, 0, 0, 1269, 1271, 3, 136, 68, 0, 1270, + 1269, 1, 0, 0, 0, 1270, 1271, 1, 0, 0, 0, 1271, 1273, 1, 0, 0, 0, 1272, + 1274, 3, 138, 69, 0, 1273, 1272, 1, 0, 0, 0, 1273, 1274, 1, 0, 0, 0, 1274, + 87, 1, 0, 0, 0, 1275, 1283, 3, 98, 49, 0, 1276, 1277, 3, 102, 51, 0, 1277, + 1279, 3, 98, 49, 0, 1278, 1280, 3, 104, 52, 0, 1279, 1278, 1, 0, 0, 0, + 1279, 1280, 1, 0, 0, 0, 1280, 1282, 1, 0, 0, 0, 1281, 1276, 1, 0, 0, 0, + 1282, 1285, 1, 0, 0, 0, 1283, 1281, 1, 0, 0, 0, 1283, 1284, 1, 0, 0, 0, + 1284, 89, 1, 0, 0, 0, 1285, 1283, 1, 0, 0, 0, 1286, 1288, 5, 130, 0, 0, + 1287, 1289, 7, 16, 0, 0, 1288, 1287, 1, 0, 0, 0, 1288, 1289, 1, 0, 0, 0, + 1289, 1290, 1, 0, 0, 0, 1290, 1295, 3, 100, 50, 0, 1291, 1292, 5, 5, 0, + 0, 1292, 1294, 3, 100, 50, 0, 1293, 1291, 1, 0, 0, 0, 1294, 1297, 1, 0, + 0, 0, 1295, 1293, 1, 0, 0, 0, 1295, 1296, 1, 0, 0, 0, 1296, 1310, 1, 0, + 0, 0, 1297, 1295, 1, 0, 0, 0, 1298, 1308, 5, 75, 0, 0, 1299, 1304, 3, 98, + 49, 0, 1300, 1301, 5, 5, 0, 0, 1301, 1303, 3, 98, 49, 0, 1302, 1300, 1, + 0, 0, 0, 1303, 1306, 1, 0, 0, 0, 1304, 1302, 1, 0, 0, 0, 1304, 1305, 1, + 0, 0, 0, 1305, 1309, 1, 0, 0, 0, 1306, 1304, 1, 0, 0, 0, 1307, 1309, 3, + 88, 44, 0, 1308, 1299, 1, 0, 0, 0, 1308, 1307, 1, 0, 0, 0, 1309, 1311, + 1, 0, 0, 0, 1310, 1298, 1, 0, 0, 0, 1310, 1311, 1, 0, 0, 0, 1311, 1314, + 1, 0, 0, 0, 1312, 1313, 5, 148, 0, 0, 1313, 1315, 3, 64, 32, 0, 1314, 1312, + 1, 0, 0, 0, 1314, 1315, 1, 0, 0, 0, 1315, 1330, 1, 0, 0, 0, 1316, 1317, + 5, 78, 0, 0, 1317, 1318, 5, 40, 0, 0, 1318, 1323, 3, 64, 32, 0, 1319, 1320, + 5, 5, 0, 0, 1320, 1322, 3, 64, 32, 0, 1321, 1319, 1, 0, 0, 0, 1322, 1325, + 1, 0, 0, 0, 1323, 1321, 1, 0, 0, 0, 1323, 1324, 1, 0, 0, 0, 1324, 1328, + 1, 0, 0, 0, 1325, 1323, 1, 0, 0, 0, 1326, 1327, 5, 79, 0, 0, 1327, 1329, + 3, 64, 32, 0, 1328, 1326, 1, 0, 0, 0, 1328, 1329, 1, 0, 0, 0, 1329, 1331, + 1, 0, 0, 0, 1330, 1316, 1, 0, 0, 0, 1330, 1331, 1, 0, 0, 0, 1331, 1346, + 1, 0, 0, 0, 1332, 1333, 5, 174, 0, 0, 1333, 1334, 3, 210, 105, 0, 1334, + 1335, 5, 33, 0, 0, 1335, 1343, 3, 120, 60, 0, 1336, 1337, 5, 5, 0, 0, 1337, + 1338, 3, 210, 105, 0, 1338, 1339, 5, 33, 0, 0, 1339, 1340, 3, 120, 60, + 0, 1340, 1342, 1, 0, 0, 0, 1341, 1336, 1, 0, 0, 0, 1342, 1345, 1, 0, 0, + 0, 1343, 1341, 1, 0, 0, 0, 1343, 1344, 1, 0, 0, 0, 1344, 1347, 1, 0, 0, + 0, 1345, 1343, 1, 0, 0, 0, 1346, 1332, 1, 0, 0, 0, 1346, 1347, 1, 0, 0, + 0, 1347, 1350, 1, 0, 0, 0, 1348, 1350, 3, 72, 36, 0, 1349, 1286, 1, 0, + 0, 0, 1349, 1348, 1, 0, 0, 0, 1350, 91, 1, 0, 0, 0, 1351, 1352, 3, 86, + 43, 0, 1352, 93, 1, 0, 0, 0, 1353, 1355, 3, 134, 67, 0, 1354, 1353, 1, + 0, 0, 0, 1354, 1355, 1, 0, 0, 0, 1355, 1356, 1, 0, 0, 0, 1356, 1358, 3, + 90, 45, 0, 1357, 1359, 3, 136, 68, 0, 1358, 1357, 1, 0, 0, 0, 1358, 1359, + 1, 0, 0, 0, 1359, 1361, 1, 0, 0, 0, 1360, 1362, 3, 138, 69, 0, 1361, 1360, + 1, 0, 0, 0, 1361, 1362, 1, 0, 0, 0, 1362, 95, 1, 0, 0, 0, 1363, 1365, 3, + 134, 67, 0, 1364, 1363, 1, 0, 0, 0, 1364, 1365, 1, 0, 0, 0, 1365, 1366, + 1, 0, 0, 0, 1366, 1376, 3, 90, 45, 0, 1367, 1369, 5, 139, 0, 0, 1368, 1370, + 5, 29, 0, 0, 1369, 1368, 1, 0, 0, 0, 1369, 1370, 1, 0, 0, 0, 1370, 1374, + 1, 0, 0, 0, 1371, 1374, 5, 90, 0, 0, 1372, 1374, 5, 68, 0, 0, 1373, 1367, + 1, 0, 0, 0, 1373, 1371, 1, 0, 0, 0, 1373, 1372, 1, 0, 0, 0, 1374, 1375, + 1, 0, 0, 0, 1375, 1377, 3, 90, 45, 0, 1376, 1373, 1, 0, 0, 0, 1377, 1378, + 1, 0, 0, 0, 1378, 1376, 1, 0, 0, 0, 1378, 1379, 1, 0, 0, 0, 1379, 1381, + 1, 0, 0, 0, 1380, 1382, 3, 136, 68, 0, 1381, 1380, 1, 0, 0, 0, 1381, 1382, + 1, 0, 0, 0, 1382, 1384, 1, 0, 0, 0, 1383, 1385, 3, 138, 69, 0, 1384, 1383, + 1, 0, 0, 0, 1384, 1385, 1, 0, 0, 0, 1385, 97, 1, 0, 0, 0, 1386, 1387, 3, + 182, 91, 0, 1387, 1388, 5, 2, 0, 0, 1388, 1390, 1, 0, 0, 0, 1389, 1386, + 1, 0, 0, 0, 1389, 1390, 1, 0, 0, 0, 1390, 1391, 1, 0, 0, 0, 1391, 1396, + 3, 184, 92, 0, 1392, 1394, 5, 33, 0, 0, 1393, 1392, 1, 0, 0, 0, 1393, 1394, + 1, 0, 0, 0, 1394, 1395, 1, 0, 0, 0, 1395, 1397, 3, 206, 103, 0, 1396, 1393, + 1, 0, 0, 0, 1396, 1397, 1, 0, 0, 0, 1397, 1403, 1, 0, 0, 0, 1398, 1399, + 5, 85, 0, 0, 1399, 1400, 5, 40, 0, 0, 1400, 1404, 3, 194, 97, 0, 1401, + 1402, 5, 102, 0, 0, 1402, 1404, 5, 85, 0, 0, 1403, 1398, 1, 0, 0, 0, 1403, + 1401, 1, 0, 0, 0, 1403, 1404, 1, 0, 0, 0, 1404, 1451, 1, 0, 0, 0, 1405, + 1406, 3, 182, 91, 0, 1406, 1407, 5, 2, 0, 0, 1407, 1409, 1, 0, 0, 0, 1408, + 1405, 1, 0, 0, 0, 1408, 1409, 1, 0, 0, 0, 1409, 1410, 1, 0, 0, 0, 1410, + 1411, 3, 222, 111, 0, 1411, 1412, 5, 3, 0, 0, 1412, 1417, 3, 64, 32, 0, + 1413, 1414, 5, 5, 0, 0, 1414, 1416, 3, 64, 32, 0, 1415, 1413, 1, 0, 0, + 0, 1416, 1419, 1, 0, 0, 0, 1417, 1415, 1, 0, 0, 0, 1417, 1418, 1, 0, 0, + 0, 1418, 1420, 1, 0, 0, 0, 1419, 1417, 1, 0, 0, 0, 1420, 1425, 5, 4, 0, + 0, 1421, 1423, 5, 33, 0, 0, 1422, 1421, 1, 0, 0, 0, 1422, 1423, 1, 0, 0, + 0, 1423, 1424, 1, 0, 0, 0, 1424, 1426, 3, 206, 103, 0, 1425, 1422, 1, 0, + 0, 0, 1425, 1426, 1, 0, 0, 0, 1426, 1451, 1, 0, 0, 0, 1427, 1437, 5, 3, + 0, 0, 1428, 1433, 3, 98, 49, 0, 1429, 1430, 5, 5, 0, 0, 1430, 1432, 3, + 98, 49, 0, 1431, 1429, 1, 0, 0, 0, 1432, 1435, 1, 0, 0, 0, 1433, 1431, + 1, 0, 0, 0, 1433, 1434, 1, 0, 0, 0, 1434, 1438, 1, 0, 0, 0, 1435, 1433, + 1, 0, 0, 0, 1436, 1438, 3, 88, 44, 0, 1437, 1428, 1, 0, 0, 0, 1437, 1436, + 1, 0, 0, 0, 1438, 1439, 1, 0, 0, 0, 1439, 1440, 5, 4, 0, 0, 1440, 1451, + 1, 0, 0, 0, 1441, 1442, 5, 3, 0, 0, 1442, 1443, 3, 86, 43, 0, 1443, 1448, + 5, 4, 0, 0, 1444, 1446, 5, 33, 0, 0, 1445, 1444, 1, 0, 0, 0, 1445, 1446, + 1, 0, 0, 0, 1446, 1447, 1, 0, 0, 0, 1447, 1449, 3, 206, 103, 0, 1448, 1445, + 1, 0, 0, 0, 1448, 1449, 1, 0, 0, 0, 1449, 1451, 1, 0, 0, 0, 1450, 1389, + 1, 0, 0, 0, 1450, 1408, 1, 0, 0, 0, 1450, 1427, 1, 0, 0, 0, 1450, 1441, + 1, 0, 0, 0, 1451, 99, 1, 0, 0, 0, 1452, 1465, 5, 7, 0, 0, 1453, 1454, 3, + 184, 92, 0, 1454, 1455, 5, 2, 0, 0, 1455, 1456, 5, 7, 0, 0, 1456, 1465, + 1, 0, 0, 0, 1457, 1462, 3, 64, 32, 0, 1458, 1460, 5, 33, 0, 0, 1459, 1458, + 1, 0, 0, 0, 1459, 1460, 1, 0, 0, 0, 1460, 1461, 1, 0, 0, 0, 1461, 1463, + 3, 174, 87, 0, 1462, 1459, 1, 0, 0, 0, 1462, 1463, 1, 0, 0, 0, 1463, 1465, + 1, 0, 0, 0, 1464, 1452, 1, 0, 0, 0, 1464, 1453, 1, 0, 0, 0, 1464, 1457, + 1, 0, 0, 0, 1465, 101, 1, 0, 0, 0, 1466, 1480, 5, 5, 0, 0, 1467, 1469, + 5, 100, 0, 0, 1468, 1467, 1, 0, 0, 0, 1468, 1469, 1, 0, 0, 0, 1469, 1476, + 1, 0, 0, 0, 1470, 1472, 5, 96, 0, 0, 1471, 1473, 5, 110, 0, 0, 1472, 1471, + 1, 0, 0, 0, 1472, 1473, 1, 0, 0, 0, 1473, 1477, 1, 0, 0, 0, 1474, 1477, + 5, 87, 0, 0, 1475, 1477, 5, 51, 0, 0, 1476, 1470, 1, 0, 0, 0, 1476, 1474, + 1, 0, 0, 0, 1476, 1475, 1, 0, 0, 0, 1476, 1477, 1, 0, 0, 0, 1477, 1478, + 1, 0, 0, 0, 1478, 1480, 5, 94, 0, 0, 1479, 1466, 1, 0, 0, 0, 1479, 1468, + 1, 0, 0, 0, 1480, 103, 1, 0, 0, 0, 1481, 1482, 5, 107, 0, 0, 1482, 1496, + 3, 64, 32, 0, 1483, 1484, 5, 142, 0, 0, 1484, 1485, 5, 3, 0, 0, 1485, 1490, + 3, 188, 94, 0, 1486, 1487, 5, 5, 0, 0, 1487, 1489, 3, 188, 94, 0, 1488, + 1486, 1, 0, 0, 0, 1489, 1492, 1, 0, 0, 0, 1490, 1488, 1, 0, 0, 0, 1490, + 1491, 1, 0, 0, 0, 1491, 1493, 1, 0, 0, 0, 1492, 1490, 1, 0, 0, 0, 1493, + 1494, 5, 4, 0, 0, 1494, 1496, 1, 0, 0, 0, 1495, 1481, 1, 0, 0, 0, 1495, + 1483, 1, 0, 0, 0, 1496, 105, 1, 0, 0, 0, 1497, 1499, 5, 139, 0, 0, 1498, + 1500, 5, 29, 0, 0, 1499, 1498, 1, 0, 0, 0, 1499, 1500, 1, 0, 0, 0, 1500, + 1504, 1, 0, 0, 0, 1501, 1504, 5, 90, 0, 0, 1502, 1504, 5, 68, 0, 0, 1503, + 1497, 1, 0, 0, 0, 1503, 1501, 1, 0, 0, 0, 1503, 1502, 1, 0, 0, 0, 1504, + 107, 1, 0, 0, 0, 1505, 1507, 3, 48, 24, 0, 1506, 1505, 1, 0, 0, 0, 1506, + 1507, 1, 0, 0, 0, 1507, 1508, 1, 0, 0, 0, 1508, 1511, 5, 141, 0, 0, 1509, + 1510, 5, 108, 0, 0, 1510, 1512, 7, 8, 0, 0, 1511, 1509, 1, 0, 0, 0, 1511, + 1512, 1, 0, 0, 0, 1512, 1513, 1, 0, 0, 0, 1513, 1514, 3, 114, 57, 0, 1514, + 1517, 5, 131, 0, 0, 1515, 1518, 3, 188, 94, 0, 1516, 1518, 3, 110, 55, + 0, 1517, 1515, 1, 0, 0, 0, 1517, 1516, 1, 0, 0, 0, 1518, 1519, 1, 0, 0, + 0, 1519, 1520, 5, 6, 0, 0, 1520, 1531, 3, 64, 32, 0, 1521, 1524, 5, 5, + 0, 0, 1522, 1525, 3, 188, 94, 0, 1523, 1525, 3, 110, 55, 0, 1524, 1522, + 1, 0, 0, 0, 1524, 1523, 1, 0, 0, 0, 1525, 1526, 1, 0, 0, 0, 1526, 1527, + 5, 6, 0, 0, 1527, 1528, 3, 64, 32, 0, 1528, 1530, 1, 0, 0, 0, 1529, 1521, + 1, 0, 0, 0, 1530, 1533, 1, 0, 0, 0, 1531, 1529, 1, 0, 0, 0, 1531, 1532, + 1, 0, 0, 0, 1532, 1546, 1, 0, 0, 0, 1533, 1531, 1, 0, 0, 0, 1534, 1544, + 5, 75, 0, 0, 1535, 1540, 3, 98, 49, 0, 1536, 1537, 5, 5, 0, 0, 1537, 1539, + 3, 98, 49, 0, 1538, 1536, 1, 0, 0, 0, 1539, 1542, 1, 0, 0, 0, 1540, 1538, + 1, 0, 0, 0, 1540, 1541, 1, 0, 0, 0, 1541, 1545, 1, 0, 0, 0, 1542, 1540, + 1, 0, 0, 0, 1543, 1545, 3, 88, 44, 0, 1544, 1535, 1, 0, 0, 0, 1544, 1543, + 1, 0, 0, 0, 1545, 1547, 1, 0, 0, 0, 1546, 1534, 1, 0, 0, 0, 1546, 1547, + 1, 0, 0, 0, 1547, 1550, 1, 0, 0, 0, 1548, 1549, 5, 148, 0, 0, 1549, 1551, + 3, 64, 32, 0, 1550, 1548, 1, 0, 0, 0, 1550, 1551, 1, 0, 0, 0, 1551, 1553, + 1, 0, 0, 0, 1552, 1554, 3, 76, 38, 0, 1553, 1552, 1, 0, 0, 0, 1553, 1554, + 1, 0, 0, 0, 1554, 109, 1, 0, 0, 0, 1555, 1556, 5, 3, 0, 0, 1556, 1561, + 3, 188, 94, 0, 1557, 1558, 5, 5, 0, 0, 1558, 1560, 3, 188, 94, 0, 1559, + 1557, 1, 0, 0, 0, 1560, 1563, 1, 0, 0, 0, 1561, 1559, 1, 0, 0, 0, 1561, + 1562, 1, 0, 0, 0, 1562, 1564, 1, 0, 0, 0, 1563, 1561, 1, 0, 0, 0, 1564, + 1565, 5, 4, 0, 0, 1565, 111, 1, 0, 0, 0, 1566, 1568, 3, 48, 24, 0, 1567, + 1566, 1, 0, 0, 0, 1567, 1568, 1, 0, 0, 0, 1568, 1569, 1, 0, 0, 0, 1569, + 1572, 5, 141, 0, 0, 1570, 1571, 5, 108, 0, 0, 1571, 1573, 7, 8, 0, 0, 1572, + 1570, 1, 0, 0, 0, 1572, 1573, 1, 0, 0, 0, 1573, 1574, 1, 0, 0, 0, 1574, + 1575, 3, 114, 57, 0, 1575, 1578, 5, 131, 0, 0, 1576, 1579, 3, 188, 94, + 0, 1577, 1579, 3, 110, 55, 0, 1578, 1576, 1, 0, 0, 0, 1578, 1577, 1, 0, + 0, 0, 1579, 1580, 1, 0, 0, 0, 1580, 1581, 5, 6, 0, 0, 1581, 1592, 3, 64, + 32, 0, 1582, 1585, 5, 5, 0, 0, 1583, 1586, 3, 188, 94, 0, 1584, 1586, 3, + 110, 55, 0, 1585, 1583, 1, 0, 0, 0, 1585, 1584, 1, 0, 0, 0, 1586, 1587, + 1, 0, 0, 0, 1587, 1588, 5, 6, 0, 0, 1588, 1589, 3, 64, 32, 0, 1589, 1591, + 1, 0, 0, 0, 1590, 1582, 1, 0, 0, 0, 1591, 1594, 1, 0, 0, 0, 1592, 1590, + 1, 0, 0, 0, 1592, 1593, 1, 0, 0, 0, 1593, 1597, 1, 0, 0, 0, 1594, 1592, + 1, 0, 0, 0, 1595, 1596, 5, 148, 0, 0, 1596, 1598, 3, 64, 32, 0, 1597, 1595, + 1, 0, 0, 0, 1597, 1598, 1, 0, 0, 0, 1598, 1600, 1, 0, 0, 0, 1599, 1601, + 3, 76, 38, 0, 1600, 1599, 1, 0, 0, 0, 1600, 1601, 1, 0, 0, 0, 1601, 1606, + 1, 0, 0, 0, 1602, 1604, 3, 136, 68, 0, 1603, 1602, 1, 0, 0, 0, 1603, 1604, + 1, 0, 0, 0, 1604, 1605, 1, 0, 0, 0, 1605, 1607, 3, 138, 69, 0, 1606, 1603, + 1, 0, 0, 0, 1606, 1607, 1, 0, 0, 0, 1607, 113, 1, 0, 0, 0, 1608, 1609, + 3, 182, 91, 0, 1609, 1610, 5, 2, 0, 0, 1610, 1612, 1, 0, 0, 0, 1611, 1608, + 1, 0, 0, 0, 1611, 1612, 1, 0, 0, 0, 1612, 1613, 1, 0, 0, 0, 1613, 1616, + 3, 184, 92, 0, 1614, 1615, 5, 33, 0, 0, 1615, 1617, 3, 212, 106, 0, 1616, + 1614, 1, 0, 0, 0, 1616, 1617, 1, 0, 0, 0, 1617, 1623, 1, 0, 0, 0, 1618, + 1619, 5, 85, 0, 0, 1619, 1620, 5, 40, 0, 0, 1620, 1624, 3, 194, 97, 0, + 1621, 1622, 5, 102, 0, 0, 1622, 1624, 5, 85, 0, 0, 1623, 1618, 1, 0, 0, + 0, 1623, 1621, 1, 0, 0, 0, 1623, 1624, 1, 0, 0, 0, 1624, 115, 1, 0, 0, + 0, 1625, 1627, 5, 143, 0, 0, 1626, 1628, 3, 182, 91, 0, 1627, 1626, 1, + 0, 0, 0, 1627, 1628, 1, 0, 0, 0, 1628, 1631, 1, 0, 0, 0, 1629, 1630, 5, + 91, 0, 0, 1630, 1632, 3, 214, 107, 0, 1631, 1629, 1, 0, 0, 0, 1631, 1632, + 1, 0, 0, 0, 1632, 117, 1, 0, 0, 0, 1633, 1634, 5, 178, 0, 0, 1634, 1635, + 5, 3, 0, 0, 1635, 1636, 5, 148, 0, 0, 1636, 1637, 3, 64, 32, 0, 1637, 1638, + 5, 4, 0, 0, 1638, 119, 1, 0, 0, 0, 1639, 1641, 5, 3, 0, 0, 1640, 1642, + 3, 216, 108, 0, 1641, 1640, 1, 0, 0, 0, 1641, 1642, 1, 0, 0, 0, 1642, 1653, + 1, 0, 0, 0, 1643, 1644, 5, 153, 0, 0, 1644, 1645, 5, 40, 0, 0, 1645, 1650, + 3, 64, 32, 0, 1646, 1647, 5, 5, 0, 0, 1647, 1649, 3, 64, 32, 0, 1648, 1646, + 1, 0, 0, 0, 1649, 1652, 1, 0, 0, 0, 1650, 1648, 1, 0, 0, 0, 1650, 1651, + 1, 0, 0, 0, 1651, 1654, 1, 0, 0, 0, 1652, 1650, 1, 0, 0, 0, 1653, 1643, + 1, 0, 0, 0, 1653, 1654, 1, 0, 0, 0, 1654, 1655, 1, 0, 0, 0, 1655, 1656, + 5, 109, 0, 0, 1656, 1657, 5, 40, 0, 0, 1657, 1662, 3, 140, 70, 0, 1658, + 1659, 5, 5, 0, 0, 1659, 1661, 3, 140, 70, 0, 1660, 1658, 1, 0, 0, 0, 1661, + 1664, 1, 0, 0, 0, 1662, 1660, 1, 0, 0, 0, 1662, 1663, 1, 0, 0, 0, 1663, + 1666, 1, 0, 0, 0, 1664, 1662, 1, 0, 0, 0, 1665, 1667, 3, 124, 62, 0, 1666, + 1665, 1, 0, 0, 0, 1666, 1667, 1, 0, 0, 0, 1667, 1668, 1, 0, 0, 0, 1668, + 1669, 5, 4, 0, 0, 1669, 121, 1, 0, 0, 0, 1670, 1704, 5, 152, 0, 0, 1671, + 1705, 3, 210, 105, 0, 1672, 1674, 5, 3, 0, 0, 1673, 1675, 3, 216, 108, + 0, 1674, 1673, 1, 0, 0, 0, 1674, 1675, 1, 0, 0, 0, 1675, 1686, 1, 0, 0, + 0, 1676, 1677, 5, 153, 0, 0, 1677, 1678, 5, 40, 0, 0, 1678, 1683, 3, 64, + 32, 0, 1679, 1680, 5, 5, 0, 0, 1680, 1682, 3, 64, 32, 0, 1681, 1679, 1, + 0, 0, 0, 1682, 1685, 1, 0, 0, 0, 1683, 1681, 1, 0, 0, 0, 1683, 1684, 1, + 0, 0, 0, 1684, 1687, 1, 0, 0, 0, 1685, 1683, 1, 0, 0, 0, 1686, 1676, 1, + 0, 0, 0, 1686, 1687, 1, 0, 0, 0, 1687, 1698, 1, 0, 0, 0, 1688, 1689, 5, + 109, 0, 0, 1689, 1690, 5, 40, 0, 0, 1690, 1695, 3, 140, 70, 0, 1691, 1692, + 5, 5, 0, 0, 1692, 1694, 3, 140, 70, 0, 1693, 1691, 1, 0, 0, 0, 1694, 1697, + 1, 0, 0, 0, 1695, 1693, 1, 0, 0, 0, 1695, 1696, 1, 0, 0, 0, 1696, 1699, + 1, 0, 0, 0, 1697, 1695, 1, 0, 0, 0, 1698, 1688, 1, 0, 0, 0, 1698, 1699, + 1, 0, 0, 0, 1699, 1701, 1, 0, 0, 0, 1700, 1702, 3, 124, 62, 0, 1701, 1700, + 1, 0, 0, 0, 1701, 1702, 1, 0, 0, 0, 1702, 1703, 1, 0, 0, 0, 1703, 1705, + 5, 4, 0, 0, 1704, 1671, 1, 0, 0, 0, 1704, 1672, 1, 0, 0, 0, 1705, 123, + 1, 0, 0, 0, 1706, 1716, 3, 126, 63, 0, 1707, 1714, 5, 180, 0, 0, 1708, + 1709, 5, 101, 0, 0, 1709, 1715, 5, 182, 0, 0, 1710, 1711, 5, 157, 0, 0, + 1711, 1715, 5, 127, 0, 0, 1712, 1715, 5, 78, 0, 0, 1713, 1715, 5, 181, + 0, 0, 1714, 1708, 1, 0, 0, 0, 1714, 1710, 1, 0, 0, 0, 1714, 1712, 1, 0, + 0, 0, 1714, 1713, 1, 0, 0, 0, 1715, 1717, 1, 0, 0, 0, 1716, 1707, 1, 0, + 0, 0, 1716, 1717, 1, 0, 0, 0, 1717, 125, 1, 0, 0, 0, 1718, 1725, 7, 17, + 0, 0, 1719, 1726, 3, 148, 74, 0, 1720, 1721, 5, 39, 0, 0, 1721, 1722, 3, + 144, 72, 0, 1722, 1723, 5, 32, 0, 0, 1723, 1724, 3, 146, 73, 0, 1724, 1726, + 1, 0, 0, 0, 1725, 1719, 1, 0, 0, 0, 1725, 1720, 1, 0, 0, 0, 1726, 127, + 1, 0, 0, 0, 1727, 1728, 3, 218, 109, 0, 1728, 1738, 5, 3, 0, 0, 1729, 1734, + 3, 64, 32, 0, 1730, 1731, 5, 5, 0, 0, 1731, 1733, 3, 64, 32, 0, 1732, 1730, + 1, 0, 0, 0, 1733, 1736, 1, 0, 0, 0, 1734, 1732, 1, 0, 0, 0, 1734, 1735, + 1, 0, 0, 0, 1735, 1739, 1, 0, 0, 0, 1736, 1734, 1, 0, 0, 0, 1737, 1739, + 5, 7, 0, 0, 1738, 1729, 1, 0, 0, 0, 1738, 1737, 1, 0, 0, 0, 1739, 1740, + 1, 0, 0, 0, 1740, 1741, 5, 4, 0, 0, 1741, 129, 1, 0, 0, 0, 1742, 1743, + 3, 220, 110, 0, 1743, 1756, 5, 3, 0, 0, 1744, 1746, 5, 62, 0, 0, 1745, + 1744, 1, 0, 0, 0, 1745, 1746, 1, 0, 0, 0, 1746, 1747, 1, 0, 0, 0, 1747, + 1752, 3, 64, 32, 0, 1748, 1749, 5, 5, 0, 0, 1749, 1751, 3, 64, 32, 0, 1750, + 1748, 1, 0, 0, 0, 1751, 1754, 1, 0, 0, 0, 1752, 1750, 1, 0, 0, 0, 1752, + 1753, 1, 0, 0, 0, 1753, 1757, 1, 0, 0, 0, 1754, 1752, 1, 0, 0, 0, 1755, + 1757, 5, 7, 0, 0, 1756, 1745, 1, 0, 0, 0, 1756, 1755, 1, 0, 0, 0, 1756, + 1757, 1, 0, 0, 0, 1757, 1758, 1, 0, 0, 0, 1758, 1760, 5, 4, 0, 0, 1759, + 1761, 3, 118, 59, 0, 1760, 1759, 1, 0, 0, 0, 1760, 1761, 1, 0, 0, 0, 1761, + 131, 1, 0, 0, 0, 1762, 1763, 3, 150, 75, 0, 1763, 1773, 5, 3, 0, 0, 1764, + 1769, 3, 64, 32, 0, 1765, 1766, 5, 5, 0, 0, 1766, 1768, 3, 64, 32, 0, 1767, + 1765, 1, 0, 0, 0, 1768, 1771, 1, 0, 0, 0, 1769, 1767, 1, 0, 0, 0, 1769, + 1770, 1, 0, 0, 0, 1770, 1774, 1, 0, 0, 0, 1771, 1769, 1, 0, 0, 0, 1772, + 1774, 5, 7, 0, 0, 1773, 1764, 1, 0, 0, 0, 1773, 1772, 1, 0, 0, 0, 1773, + 1774, 1, 0, 0, 0, 1774, 1775, 1, 0, 0, 0, 1775, 1777, 5, 4, 0, 0, 1776, + 1778, 3, 118, 59, 0, 1777, 1776, 1, 0, 0, 0, 1777, 1778, 1, 0, 0, 0, 1778, + 1779, 1, 0, 0, 0, 1779, 1782, 5, 152, 0, 0, 1780, 1783, 3, 120, 60, 0, + 1781, 1783, 3, 210, 105, 0, 1782, 1780, 1, 0, 0, 0, 1782, 1781, 1, 0, 0, + 0, 1783, 133, 1, 0, 0, 0, 1784, 1786, 5, 149, 0, 0, 1785, 1787, 5, 116, + 0, 0, 1786, 1785, 1, 0, 0, 0, 1786, 1787, 1, 0, 0, 0, 1787, 1788, 1, 0, + 0, 0, 1788, 1793, 3, 54, 27, 0, 1789, 1790, 5, 5, 0, 0, 1790, 1792, 3, + 54, 27, 0, 1791, 1789, 1, 0, 0, 0, 1792, 1795, 1, 0, 0, 0, 1793, 1791, + 1, 0, 0, 0, 1793, 1794, 1, 0, 0, 0, 1794, 135, 1, 0, 0, 0, 1795, 1793, + 1, 0, 0, 0, 1796, 1797, 5, 109, 0, 0, 1797, 1798, 5, 40, 0, 0, 1798, 1803, + 3, 140, 70, 0, 1799, 1800, 5, 5, 0, 0, 1800, 1802, 3, 140, 70, 0, 1801, + 1799, 1, 0, 0, 0, 1802, 1805, 1, 0, 0, 0, 1803, 1801, 1, 0, 0, 0, 1803, + 1804, 1, 0, 0, 0, 1804, 137, 1, 0, 0, 0, 1805, 1803, 1, 0, 0, 0, 1806, + 1807, 5, 98, 0, 0, 1807, 1810, 3, 64, 32, 0, 1808, 1809, 7, 18, 0, 0, 1809, + 1811, 3, 64, 32, 0, 1810, 1808, 1, 0, 0, 0, 1810, 1811, 1, 0, 0, 0, 1811, + 139, 1, 0, 0, 0, 1812, 1815, 3, 64, 32, 0, 1813, 1814, 5, 45, 0, 0, 1814, + 1816, 3, 190, 95, 0, 1815, 1813, 1, 0, 0, 0, 1815, 1816, 1, 0, 0, 0, 1816, + 1818, 1, 0, 0, 0, 1817, 1819, 3, 142, 71, 0, 1818, 1817, 1, 0, 0, 0, 1818, + 1819, 1, 0, 0, 0, 1819, 1822, 1, 0, 0, 0, 1820, 1821, 5, 175, 0, 0, 1821, + 1823, 7, 19, 0, 0, 1822, 1820, 1, 0, 0, 0, 1822, 1823, 1, 0, 0, 0, 1823, + 141, 1, 0, 0, 0, 1824, 1825, 7, 20, 0, 0, 1825, 143, 1, 0, 0, 0, 1826, + 1827, 3, 64, 32, 0, 1827, 1828, 5, 155, 0, 0, 1828, 1837, 1, 0, 0, 0, 1829, + 1830, 3, 64, 32, 0, 1830, 1831, 5, 158, 0, 0, 1831, 1837, 1, 0, 0, 0, 1832, + 1833, 5, 157, 0, 0, 1833, 1837, 5, 127, 0, 0, 1834, 1835, 5, 156, 0, 0, + 1835, 1837, 5, 155, 0, 0, 1836, 1826, 1, 0, 0, 0, 1836, 1829, 1, 0, 0, + 0, 1836, 1832, 1, 0, 0, 0, 1836, 1834, 1, 0, 0, 0, 1837, 145, 1, 0, 0, + 0, 1838, 1839, 3, 64, 32, 0, 1839, 1840, 5, 155, 0, 0, 1840, 1849, 1, 0, + 0, 0, 1841, 1842, 3, 64, 32, 0, 1842, 1843, 5, 158, 0, 0, 1843, 1849, 1, + 0, 0, 0, 1844, 1845, 5, 157, 0, 0, 1845, 1849, 5, 127, 0, 0, 1846, 1847, + 5, 156, 0, 0, 1847, 1849, 5, 158, 0, 0, 1848, 1838, 1, 0, 0, 0, 1848, 1841, + 1, 0, 0, 0, 1848, 1844, 1, 0, 0, 0, 1848, 1846, 1, 0, 0, 0, 1849, 147, + 1, 0, 0, 0, 1850, 1851, 3, 64, 32, 0, 1851, 1852, 5, 155, 0, 0, 1852, 1858, + 1, 0, 0, 0, 1853, 1854, 5, 156, 0, 0, 1854, 1858, 5, 155, 0, 0, 1855, 1856, + 5, 157, 0, 0, 1856, 1858, 5, 127, 0, 0, 1857, 1850, 1, 0, 0, 0, 1857, 1853, + 1, 0, 0, 0, 1857, 1855, 1, 0, 0, 0, 1858, 149, 1, 0, 0, 0, 1859, 1860, + 7, 21, 0, 0, 1860, 1861, 5, 3, 0, 0, 1861, 1862, 3, 64, 32, 0, 1862, 1863, + 5, 4, 0, 0, 1863, 1864, 5, 152, 0, 0, 1864, 1866, 5, 3, 0, 0, 1865, 1867, + 3, 156, 78, 0, 1866, 1865, 1, 0, 0, 0, 1866, 1867, 1, 0, 0, 0, 1867, 1868, + 1, 0, 0, 0, 1868, 1870, 3, 160, 80, 0, 1869, 1871, 3, 126, 63, 0, 1870, + 1869, 1, 0, 0, 0, 1870, 1871, 1, 0, 0, 0, 1871, 1872, 1, 0, 0, 0, 1872, + 1873, 5, 4, 0, 0, 1873, 1945, 1, 0, 0, 0, 1874, 1875, 7, 22, 0, 0, 1875, + 1876, 5, 3, 0, 0, 1876, 1877, 5, 4, 0, 0, 1877, 1878, 5, 152, 0, 0, 1878, + 1880, 5, 3, 0, 0, 1879, 1881, 3, 156, 78, 0, 1880, 1879, 1, 0, 0, 0, 1880, + 1881, 1, 0, 0, 0, 1881, 1883, 1, 0, 0, 0, 1882, 1884, 3, 158, 79, 0, 1883, + 1882, 1, 0, 0, 0, 1883, 1884, 1, 0, 0, 0, 1884, 1885, 1, 0, 0, 0, 1885, + 1945, 5, 4, 0, 0, 1886, 1887, 7, 23, 0, 0, 1887, 1888, 5, 3, 0, 0, 1888, + 1889, 5, 4, 0, 0, 1889, 1890, 5, 152, 0, 0, 1890, 1892, 5, 3, 0, 0, 1891, + 1893, 3, 156, 78, 0, 1892, 1891, 1, 0, 0, 0, 1892, 1893, 1, 0, 0, 0, 1893, + 1894, 1, 0, 0, 0, 1894, 1895, 3, 160, 80, 0, 1895, 1896, 5, 4, 0, 0, 1896, + 1945, 1, 0, 0, 0, 1897, 1898, 7, 24, 0, 0, 1898, 1899, 5, 3, 0, 0, 1899, + 1901, 3, 64, 32, 0, 1900, 1902, 3, 152, 76, 0, 1901, 1900, 1, 0, 0, 0, + 1901, 1902, 1, 0, 0, 0, 1902, 1904, 1, 0, 0, 0, 1903, 1905, 3, 154, 77, + 0, 1904, 1903, 1, 0, 0, 0, 1904, 1905, 1, 0, 0, 0, 1905, 1906, 1, 0, 0, + 0, 1906, 1907, 5, 4, 0, 0, 1907, 1908, 5, 152, 0, 0, 1908, 1910, 5, 3, + 0, 0, 1909, 1911, 3, 156, 78, 0, 1910, 1909, 1, 0, 0, 0, 1910, 1911, 1, + 0, 0, 0, 1911, 1912, 1, 0, 0, 0, 1912, 1913, 3, 160, 80, 0, 1913, 1914, + 5, 4, 0, 0, 1914, 1945, 1, 0, 0, 0, 1915, 1916, 5, 164, 0, 0, 1916, 1917, + 5, 3, 0, 0, 1917, 1918, 3, 64, 32, 0, 1918, 1919, 5, 5, 0, 0, 1919, 1920, + 3, 34, 17, 0, 1920, 1921, 5, 4, 0, 0, 1921, 1922, 5, 152, 0, 0, 1922, 1924, + 5, 3, 0, 0, 1923, 1925, 3, 156, 78, 0, 1924, 1923, 1, 0, 0, 0, 1924, 1925, + 1, 0, 0, 0, 1925, 1926, 1, 0, 0, 0, 1926, 1928, 3, 160, 80, 0, 1927, 1929, + 3, 126, 63, 0, 1928, 1927, 1, 0, 0, 0, 1928, 1929, 1, 0, 0, 0, 1929, 1930, + 1, 0, 0, 0, 1930, 1931, 5, 4, 0, 0, 1931, 1945, 1, 0, 0, 0, 1932, 1933, + 5, 165, 0, 0, 1933, 1934, 5, 3, 0, 0, 1934, 1935, 3, 64, 32, 0, 1935, 1936, + 5, 4, 0, 0, 1936, 1937, 5, 152, 0, 0, 1937, 1939, 5, 3, 0, 0, 1938, 1940, + 3, 156, 78, 0, 1939, 1938, 1, 0, 0, 0, 1939, 1940, 1, 0, 0, 0, 1940, 1941, + 1, 0, 0, 0, 1941, 1942, 3, 160, 80, 0, 1942, 1943, 5, 4, 0, 0, 1943, 1945, + 1, 0, 0, 0, 1944, 1859, 1, 0, 0, 0, 1944, 1874, 1, 0, 0, 0, 1944, 1886, + 1, 0, 0, 0, 1944, 1897, 1, 0, 0, 0, 1944, 1915, 1, 0, 0, 0, 1944, 1932, + 1, 0, 0, 0, 1945, 151, 1, 0, 0, 0, 1946, 1947, 5, 5, 0, 0, 1947, 1948, + 3, 34, 17, 0, 1948, 153, 1, 0, 0, 0, 1949, 1950, 5, 5, 0, 0, 1950, 1951, + 3, 34, 17, 0, 1951, 155, 1, 0, 0, 0, 1952, 1953, 5, 153, 0, 0, 1953, 1955, + 5, 40, 0, 0, 1954, 1956, 3, 64, 32, 0, 1955, 1954, 1, 0, 0, 0, 1956, 1957, + 1, 0, 0, 0, 1957, 1955, 1, 0, 0, 0, 1957, 1958, 1, 0, 0, 0, 1958, 157, + 1, 0, 0, 0, 1959, 1960, 5, 109, 0, 0, 1960, 1962, 5, 40, 0, 0, 1961, 1963, + 3, 64, 32, 0, 1962, 1961, 1, 0, 0, 0, 1963, 1964, 1, 0, 0, 0, 1964, 1962, + 1, 0, 0, 0, 1964, 1965, 1, 0, 0, 0, 1965, 159, 1, 0, 0, 0, 1966, 1967, + 5, 109, 0, 0, 1967, 1968, 5, 40, 0, 0, 1968, 1969, 3, 162, 81, 0, 1969, + 161, 1, 0, 0, 0, 1970, 1972, 3, 64, 32, 0, 1971, 1973, 3, 142, 71, 0, 1972, + 1971, 1, 0, 0, 0, 1972, 1973, 1, 0, 0, 0, 1973, 1981, 1, 0, 0, 0, 1974, + 1975, 5, 5, 0, 0, 1975, 1977, 3, 64, 32, 0, 1976, 1978, 3, 142, 71, 0, + 1977, 1976, 1, 0, 0, 0, 1977, 1978, 1, 0, 0, 0, 1978, 1980, 1, 0, 0, 0, + 1979, 1974, 1, 0, 0, 0, 1980, 1983, 1, 0, 0, 0, 1981, 1979, 1, 0, 0, 0, + 1981, 1982, 1, 0, 0, 0, 1982, 163, 1, 0, 0, 0, 1983, 1981, 1, 0, 0, 0, + 1984, 1985, 3, 86, 43, 0, 1985, 165, 1, 0, 0, 0, 1986, 1987, 3, 86, 43, + 0, 1987, 167, 1, 0, 0, 0, 1988, 1989, 7, 25, 0, 0, 1989, 169, 1, 0, 0, + 0, 1990, 1991, 5, 188, 0, 0, 1991, 171, 1, 0, 0, 0, 1992, 1995, 3, 64, + 32, 0, 1993, 1995, 3, 28, 14, 0, 1994, 1992, 1, 0, 0, 0, 1994, 1993, 1, + 0, 0, 0, 1995, 173, 1, 0, 0, 0, 1996, 1997, 7, 26, 0, 0, 1997, 175, 1, + 0, 0, 0, 1998, 1999, 7, 27, 0, 0, 1999, 177, 1, 0, 0, 0, 2000, 2001, 3, + 224, 112, 0, 2001, 179, 1, 0, 0, 0, 2002, 2003, 3, 224, 112, 0, 2003, 181, + 1, 0, 0, 0, 2004, 2005, 3, 224, 112, 0, 2005, 183, 1, 0, 0, 0, 2006, 2007, + 3, 224, 112, 0, 2007, 185, 1, 0, 0, 0, 2008, 2009, 3, 224, 112, 0, 2009, + 187, 1, 0, 0, 0, 2010, 2011, 3, 224, 112, 0, 2011, 189, 1, 0, 0, 0, 2012, + 2013, 3, 224, 112, 0, 2013, 191, 1, 0, 0, 0, 2014, 2015, 3, 224, 112, 0, + 2015, 193, 1, 0, 0, 0, 2016, 2017, 3, 224, 112, 0, 2017, 195, 1, 0, 0, + 0, 2018, 2019, 3, 224, 112, 0, 2019, 197, 1, 0, 0, 0, 2020, 2021, 3, 224, + 112, 0, 2021, 199, 1, 0, 0, 0, 2022, 2023, 3, 224, 112, 0, 2023, 201, 1, + 0, 0, 0, 2024, 2025, 3, 224, 112, 0, 2025, 203, 1, 0, 0, 0, 2026, 2027, + 3, 224, 112, 0, 2027, 205, 1, 0, 0, 0, 2028, 2029, 3, 224, 112, 0, 2029, + 207, 1, 0, 0, 0, 2030, 2031, 3, 224, 112, 0, 2031, 209, 1, 0, 0, 0, 2032, + 2033, 3, 224, 112, 0, 2033, 211, 1, 0, 0, 0, 2034, 2035, 3, 224, 112, 0, + 2035, 213, 1, 0, 0, 0, 2036, 2037, 3, 224, 112, 0, 2037, 215, 1, 0, 0, + 0, 2038, 2039, 3, 224, 112, 0, 2039, 217, 1, 0, 0, 0, 2040, 2041, 3, 224, + 112, 0, 2041, 219, 1, 0, 0, 0, 2042, 2043, 3, 224, 112, 0, 2043, 221, 1, + 0, 0, 0, 2044, 2045, 3, 224, 112, 0, 2045, 223, 1, 0, 0, 0, 2046, 2054, + 5, 185, 0, 0, 2047, 2054, 3, 176, 88, 0, 2048, 2054, 5, 188, 0, 0, 2049, + 2050, 5, 3, 0, 0, 2050, 2051, 3, 224, 112, 0, 2051, 2052, 5, 4, 0, 0, 2052, + 2054, 1, 0, 0, 0, 2053, 2046, 1, 0, 0, 0, 2053, 2047, 1, 0, 0, 0, 2053, + 2048, 1, 0, 0, 0, 2053, 2049, 1, 0, 0, 0, 2054, 225, 1, 0, 0, 0, 296, 229, + 237, 244, 249, 255, 261, 263, 289, 296, 303, 309, 313, 318, 321, 328, 331, + 335, 343, 347, 349, 353, 357, 361, 364, 371, 377, 383, 388, 399, 405, 409, + 413, 416, 420, 426, 431, 440, 447, 453, 457, 461, 466, 472, 484, 488, 493, + 496, 499, 502, 506, 509, 523, 530, 537, 539, 542, 548, 553, 561, 566, 581, + 587, 597, 602, 612, 616, 618, 622, 627, 629, 637, 643, 648, 655, 666, 669, + 671, 678, 682, 689, 695, 701, 707, 712, 721, 726, 737, 742, 753, 758, 762, + 778, 788, 793, 801, 813, 818, 826, 833, 836, 839, 846, 849, 852, 855, 859, + 867, 872, 882, 887, 896, 903, 907, 911, 914, 922, 935, 938, 946, 955, 959, + 964, 994, 1006, 1011, 1023, 1029, 1036, 1040, 1050, 1053, 1059, 1065, 1074, + 1077, 1081, 1083, 1085, 1094, 1106, 1117, 1121, 1128, 1134, 1139, 1147, + 1152, 1156, 1159, 1163, 1166, 1174, 1185, 1191, 1193, 1201, 1208, 1215, + 1220, 1222, 1228, 1237, 1242, 1249, 1253, 1255, 1258, 1266, 1270, 1273, + 1279, 1283, 1288, 1295, 1304, 1308, 1310, 1314, 1323, 1328, 1330, 1343, + 1346, 1349, 1354, 1358, 1361, 1364, 1369, 1373, 1378, 1381, 1384, 1389, + 1393, 1396, 1403, 1408, 1417, 1422, 1425, 1433, 1437, 1445, 1448, 1450, + 1459, 1462, 1464, 1468, 1472, 1476, 1479, 1490, 1495, 1499, 1503, 1506, + 1511, 1517, 1524, 1531, 1540, 1544, 1546, 1550, 1553, 1561, 1567, 1572, + 1578, 1585, 1592, 1597, 1600, 1603, 1606, 1611, 1616, 1623, 1627, 1631, + 1641, 1650, 1653, 1662, 1666, 1674, 1683, 1686, 1695, 1698, 1701, 1704, + 1714, 1716, 1725, 1734, 1738, 1745, 1752, 1756, 1760, 1769, 1773, 1777, + 1782, 1786, 1793, 1803, 1810, 1815, 1818, 1822, 1836, 1848, 1857, 1866, + 1870, 1880, 1883, 1892, 1901, 1904, 1910, 1924, 1928, 1939, 1944, 1957, + 1964, 1972, 1977, 1981, 1994, 2053, + } + deserializer := antlr.NewATNDeserializer(nil) + staticData.atn = deserializer.Deserialize(staticData.serializedATN) + atn := staticData.atn + staticData.decisionToDFA = make([]*antlr.DFA, len(atn.DecisionToState)) + decisionToDFA := staticData.decisionToDFA + for index, state := range atn.DecisionToState { + decisionToDFA[index] = antlr.NewDFA(state, index) + } +} + +// SQLiteParserInit initializes any static state used to implement SQLiteParser. By default the +// static state used to implement the parser is lazily initialized during the first call to +// NewSQLiteParser(). You can call this function if you wish to initialize the static state ahead +// of time. +func SQLiteParserInit() { + staticData := &sqliteparserParserStaticData + staticData.once.Do(sqliteparserParserInit) +} + +// NewSQLiteParser produces a new parser instance for the optional input antlr.TokenStream. +func NewSQLiteParser(input antlr.TokenStream) *SQLiteParser { + SQLiteParserInit() + this := new(SQLiteParser) + this.BaseParser = antlr.NewBaseParser(input) + staticData := &sqliteparserParserStaticData + this.Interpreter = antlr.NewParserATNSimulator(this, staticData.atn, staticData.decisionToDFA, staticData.predictionContextCache) + this.RuleNames = staticData.ruleNames + this.LiteralNames = staticData.literalNames + this.SymbolicNames = staticData.symbolicNames + this.GrammarFileName = "SQLiteParser.g4" + + return this +} + +// SQLiteParser tokens. +const ( + SQLiteParserEOF = antlr.TokenEOF + SQLiteParserSCOL = 1 + SQLiteParserDOT = 2 + SQLiteParserOPEN_PAR = 3 + SQLiteParserCLOSE_PAR = 4 + SQLiteParserCOMMA = 5 + SQLiteParserASSIGN = 6 + SQLiteParserSTAR = 7 + SQLiteParserPLUS = 8 + SQLiteParserMINUS = 9 + SQLiteParserTILDE = 10 + SQLiteParserPIPE2 = 11 + SQLiteParserDIV = 12 + SQLiteParserMOD = 13 + SQLiteParserLT2 = 14 + SQLiteParserGT2 = 15 + SQLiteParserAMP = 16 + SQLiteParserPIPE = 17 + SQLiteParserLT = 18 + SQLiteParserLT_EQ = 19 + SQLiteParserGT = 20 + SQLiteParserGT_EQ = 21 + SQLiteParserEQ = 22 + SQLiteParserNOT_EQ1 = 23 + SQLiteParserNOT_EQ2 = 24 + SQLiteParserABORT_ = 25 + SQLiteParserACTION_ = 26 + SQLiteParserADD_ = 27 + SQLiteParserAFTER_ = 28 + SQLiteParserALL_ = 29 + SQLiteParserALTER_ = 30 + SQLiteParserANALYZE_ = 31 + SQLiteParserAND_ = 32 + SQLiteParserAS_ = 33 + SQLiteParserASC_ = 34 + SQLiteParserATTACH_ = 35 + SQLiteParserAUTOINCREMENT_ = 36 + SQLiteParserBEFORE_ = 37 + SQLiteParserBEGIN_ = 38 + SQLiteParserBETWEEN_ = 39 + SQLiteParserBY_ = 40 + SQLiteParserCASCADE_ = 41 + SQLiteParserCASE_ = 42 + SQLiteParserCAST_ = 43 + SQLiteParserCHECK_ = 44 + SQLiteParserCOLLATE_ = 45 + SQLiteParserCOLUMN_ = 46 + SQLiteParserCOMMIT_ = 47 + SQLiteParserCONFLICT_ = 48 + SQLiteParserCONSTRAINT_ = 49 + SQLiteParserCREATE_ = 50 + SQLiteParserCROSS_ = 51 + SQLiteParserCURRENT_DATE_ = 52 + SQLiteParserCURRENT_TIME_ = 53 + SQLiteParserCURRENT_TIMESTAMP_ = 54 + SQLiteParserDATABASE_ = 55 + SQLiteParserDEFAULT_ = 56 + SQLiteParserDEFERRABLE_ = 57 + SQLiteParserDEFERRED_ = 58 + SQLiteParserDELETE_ = 59 + SQLiteParserDESC_ = 60 + SQLiteParserDETACH_ = 61 + SQLiteParserDISTINCT_ = 62 + SQLiteParserDROP_ = 63 + SQLiteParserEACH_ = 64 + SQLiteParserELSE_ = 65 + SQLiteParserEND_ = 66 + SQLiteParserESCAPE_ = 67 + SQLiteParserEXCEPT_ = 68 + SQLiteParserEXCLUSIVE_ = 69 + SQLiteParserEXISTS_ = 70 + SQLiteParserEXPLAIN_ = 71 + SQLiteParserFAIL_ = 72 + SQLiteParserFOR_ = 73 + SQLiteParserFOREIGN_ = 74 + SQLiteParserFROM_ = 75 + SQLiteParserFULL_ = 76 + SQLiteParserGLOB_ = 77 + SQLiteParserGROUP_ = 78 + SQLiteParserHAVING_ = 79 + SQLiteParserIF_ = 80 + SQLiteParserIGNORE_ = 81 + SQLiteParserIMMEDIATE_ = 82 + SQLiteParserIN_ = 83 + SQLiteParserINDEX_ = 84 + SQLiteParserINDEXED_ = 85 + SQLiteParserINITIALLY_ = 86 + SQLiteParserINNER_ = 87 + SQLiteParserINSERT_ = 88 + SQLiteParserINSTEAD_ = 89 + SQLiteParserINTERSECT_ = 90 + SQLiteParserINTO_ = 91 + SQLiteParserIS_ = 92 + SQLiteParserISNULL_ = 93 + SQLiteParserJOIN_ = 94 + SQLiteParserKEY_ = 95 + SQLiteParserLEFT_ = 96 + SQLiteParserLIKE_ = 97 + SQLiteParserLIMIT_ = 98 + SQLiteParserMATCH_ = 99 + SQLiteParserNATURAL_ = 100 + SQLiteParserNO_ = 101 + SQLiteParserNOT_ = 102 + SQLiteParserNOTNULL_ = 103 + SQLiteParserNULL_ = 104 + SQLiteParserOF_ = 105 + SQLiteParserOFFSET_ = 106 + SQLiteParserON_ = 107 + SQLiteParserOR_ = 108 + SQLiteParserORDER_ = 109 + SQLiteParserOUTER_ = 110 + SQLiteParserPLAN_ = 111 + SQLiteParserPRAGMA_ = 112 + SQLiteParserPRIMARY_ = 113 + SQLiteParserQUERY_ = 114 + SQLiteParserRAISE_ = 115 + SQLiteParserRECURSIVE_ = 116 + SQLiteParserREFERENCES_ = 117 + SQLiteParserREGEXP_ = 118 + SQLiteParserREINDEX_ = 119 + SQLiteParserRELEASE_ = 120 + SQLiteParserRENAME_ = 121 + SQLiteParserREPLACE_ = 122 + SQLiteParserRESTRICT_ = 123 + SQLiteParserRETURNING_ = 124 + SQLiteParserRIGHT_ = 125 + SQLiteParserROLLBACK_ = 126 + SQLiteParserROW_ = 127 + SQLiteParserROWS_ = 128 + SQLiteParserSAVEPOINT_ = 129 + SQLiteParserSELECT_ = 130 + SQLiteParserSET_ = 131 + SQLiteParserTABLE_ = 132 + SQLiteParserTEMP_ = 133 + SQLiteParserTEMPORARY_ = 134 + SQLiteParserTHEN_ = 135 + SQLiteParserTO_ = 136 + SQLiteParserTRANSACTION_ = 137 + SQLiteParserTRIGGER_ = 138 + SQLiteParserUNION_ = 139 + SQLiteParserUNIQUE_ = 140 + SQLiteParserUPDATE_ = 141 + SQLiteParserUSING_ = 142 + SQLiteParserVACUUM_ = 143 + SQLiteParserVALUES_ = 144 + SQLiteParserVIEW_ = 145 + SQLiteParserVIRTUAL_ = 146 + SQLiteParserWHEN_ = 147 + SQLiteParserWHERE_ = 148 + SQLiteParserWITH_ = 149 + SQLiteParserWITHOUT_ = 150 + SQLiteParserFIRST_VALUE_ = 151 + SQLiteParserOVER_ = 152 + SQLiteParserPARTITION_ = 153 + SQLiteParserRANGE_ = 154 + SQLiteParserPRECEDING_ = 155 + SQLiteParserUNBOUNDED_ = 156 + SQLiteParserCURRENT_ = 157 + SQLiteParserFOLLOWING_ = 158 + SQLiteParserCUME_DIST_ = 159 + SQLiteParserDENSE_RANK_ = 160 + SQLiteParserLAG_ = 161 + SQLiteParserLAST_VALUE_ = 162 + SQLiteParserLEAD_ = 163 + SQLiteParserNTH_VALUE_ = 164 + SQLiteParserNTILE_ = 165 + SQLiteParserPERCENT_RANK_ = 166 + SQLiteParserRANK_ = 167 + SQLiteParserROW_NUMBER_ = 168 + SQLiteParserGENERATED_ = 169 + SQLiteParserALWAYS_ = 170 + SQLiteParserSTORED_ = 171 + SQLiteParserTRUE_ = 172 + SQLiteParserFALSE_ = 173 + SQLiteParserWINDOW_ = 174 + SQLiteParserNULLS_ = 175 + SQLiteParserFIRST_ = 176 + SQLiteParserLAST_ = 177 + SQLiteParserFILTER_ = 178 + SQLiteParserGROUPS_ = 179 + SQLiteParserEXCLUDE_ = 180 + SQLiteParserTIES_ = 181 + SQLiteParserOTHERS_ = 182 + SQLiteParserDO_ = 183 + SQLiteParserNOTHING_ = 184 + SQLiteParserIDENTIFIER = 185 + SQLiteParserNUMERIC_LITERAL = 186 + SQLiteParserBIND_PARAMETER = 187 + SQLiteParserSTRING_LITERAL = 188 + SQLiteParserBLOB_LITERAL = 189 + SQLiteParserSINGLE_LINE_COMMENT = 190 + SQLiteParserMULTILINE_COMMENT = 191 + SQLiteParserSPACES = 192 + SQLiteParserUNEXPECTED_CHAR = 193 +) + +// SQLiteParser rules. +const ( + SQLiteParserRULE_parse = 0 + SQLiteParserRULE_sql_stmt_list = 1 + SQLiteParserRULE_sql_stmt = 2 + SQLiteParserRULE_alter_table_stmt = 3 + SQLiteParserRULE_analyze_stmt = 4 + SQLiteParserRULE_attach_stmt = 5 + SQLiteParserRULE_begin_stmt = 6 + SQLiteParserRULE_commit_stmt = 7 + SQLiteParserRULE_rollback_stmt = 8 + SQLiteParserRULE_savepoint_stmt = 9 + SQLiteParserRULE_release_stmt = 10 + SQLiteParserRULE_create_index_stmt = 11 + SQLiteParserRULE_indexed_column = 12 + SQLiteParserRULE_create_table_stmt = 13 + SQLiteParserRULE_column_def = 14 + SQLiteParserRULE_type_name = 15 + SQLiteParserRULE_column_constraint = 16 + SQLiteParserRULE_signed_number = 17 + SQLiteParserRULE_table_constraint = 18 + SQLiteParserRULE_foreign_key_clause = 19 + SQLiteParserRULE_conflict_clause = 20 + SQLiteParserRULE_create_trigger_stmt = 21 + SQLiteParserRULE_create_view_stmt = 22 + SQLiteParserRULE_create_virtual_table_stmt = 23 + SQLiteParserRULE_with_clause = 24 + SQLiteParserRULE_cte_table_name = 25 + SQLiteParserRULE_recursive_cte = 26 + SQLiteParserRULE_common_table_expression = 27 + SQLiteParserRULE_delete_stmt = 28 + SQLiteParserRULE_delete_stmt_limited = 29 + SQLiteParserRULE_detach_stmt = 30 + SQLiteParserRULE_drop_stmt = 31 + SQLiteParserRULE_expr = 32 + SQLiteParserRULE_raise_function = 33 + SQLiteParserRULE_literal_value = 34 + SQLiteParserRULE_value_row = 35 + SQLiteParserRULE_values_clause = 36 + SQLiteParserRULE_insert_stmt = 37 + SQLiteParserRULE_returning_clause = 38 + SQLiteParserRULE_upsert_clause = 39 + SQLiteParserRULE_pragma_stmt = 40 + SQLiteParserRULE_pragma_value = 41 + SQLiteParserRULE_reindex_stmt = 42 + SQLiteParserRULE_select_stmt = 43 + SQLiteParserRULE_join_clause = 44 + SQLiteParserRULE_select_core = 45 + SQLiteParserRULE_factored_select_stmt = 46 + SQLiteParserRULE_simple_select_stmt = 47 + SQLiteParserRULE_compound_select_stmt = 48 + SQLiteParserRULE_table_or_subquery = 49 + SQLiteParserRULE_result_column = 50 + SQLiteParserRULE_join_operator = 51 + SQLiteParserRULE_join_constraint = 52 + SQLiteParserRULE_compound_operator = 53 + SQLiteParserRULE_update_stmt = 54 + SQLiteParserRULE_column_name_list = 55 + SQLiteParserRULE_update_stmt_limited = 56 + SQLiteParserRULE_qualified_table_name = 57 + SQLiteParserRULE_vacuum_stmt = 58 + SQLiteParserRULE_filter_clause = 59 + SQLiteParserRULE_window_defn = 60 + SQLiteParserRULE_over_clause = 61 + SQLiteParserRULE_frame_spec = 62 + SQLiteParserRULE_frame_clause = 63 + SQLiteParserRULE_simple_function_invocation = 64 + SQLiteParserRULE_aggregate_function_invocation = 65 + SQLiteParserRULE_window_function_invocation = 66 + SQLiteParserRULE_common_table_stmt = 67 + SQLiteParserRULE_order_by_stmt = 68 + SQLiteParserRULE_limit_stmt = 69 + SQLiteParserRULE_ordering_term = 70 + SQLiteParserRULE_asc_desc = 71 + SQLiteParserRULE_frame_left = 72 + SQLiteParserRULE_frame_right = 73 + SQLiteParserRULE_frame_single = 74 + SQLiteParserRULE_window_function = 75 + SQLiteParserRULE_offset = 76 + SQLiteParserRULE_default_value = 77 + SQLiteParserRULE_partition_by = 78 + SQLiteParserRULE_order_by_expr = 79 + SQLiteParserRULE_order_by_expr_asc_desc = 80 + SQLiteParserRULE_expr_asc_desc = 81 + SQLiteParserRULE_initial_select = 82 + SQLiteParserRULE_recursive_select = 83 + SQLiteParserRULE_unary_operator = 84 + SQLiteParserRULE_error_message = 85 + SQLiteParserRULE_module_argument = 86 + SQLiteParserRULE_column_alias = 87 + SQLiteParserRULE_keyword = 88 + SQLiteParserRULE_name = 89 + SQLiteParserRULE_function_name = 90 + SQLiteParserRULE_schema_name = 91 + SQLiteParserRULE_table_name = 92 + SQLiteParserRULE_table_or_index_name = 93 + SQLiteParserRULE_column_name = 94 + SQLiteParserRULE_collation_name = 95 + SQLiteParserRULE_foreign_table = 96 + SQLiteParserRULE_index_name = 97 + SQLiteParserRULE_trigger_name = 98 + SQLiteParserRULE_view_name = 99 + SQLiteParserRULE_module_name = 100 + SQLiteParserRULE_pragma_name = 101 + SQLiteParserRULE_savepoint_name = 102 + SQLiteParserRULE_table_alias = 103 + SQLiteParserRULE_transaction_name = 104 + SQLiteParserRULE_window_name = 105 + SQLiteParserRULE_alias = 106 + SQLiteParserRULE_filename = 107 + SQLiteParserRULE_base_window_name = 108 + SQLiteParserRULE_simple_func = 109 + SQLiteParserRULE_aggregate_func = 110 + SQLiteParserRULE_table_function_name = 111 + SQLiteParserRULE_any_name = 112 +) + +// IParseContext is an interface to support dynamic dispatch. +type IParseContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + EOF() antlr.TerminalNode + AllSql_stmt_list() []ISql_stmt_listContext + Sql_stmt_list(i int) ISql_stmt_listContext + + // IsParseContext differentiates from other interfaces. + IsParseContext() +} + +type ParseContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyParseContext() *ParseContext { + var p = new(ParseContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_parse + return p +} + +func (*ParseContext) IsParseContext() {} + +func NewParseContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ParseContext { + var p = new(ParseContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_parse + + return p +} + +func (s *ParseContext) GetParser() antlr.Parser { return s.parser } + +func (s *ParseContext) EOF() antlr.TerminalNode { + return s.GetToken(SQLiteParserEOF, 0) +} + +func (s *ParseContext) AllSql_stmt_list() []ISql_stmt_listContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(ISql_stmt_listContext); ok { + len++ + } + } + + tst := make([]ISql_stmt_listContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(ISql_stmt_listContext); ok { + tst[i] = t.(ISql_stmt_listContext) + i++ + } + } + + return tst +} + +func (s *ParseContext) Sql_stmt_list(i int) ISql_stmt_listContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ISql_stmt_listContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(ISql_stmt_listContext) +} + +func (s *ParseContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *ParseContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *ParseContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterParse(s) + } +} + +func (s *ParseContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitParse(s) + } +} + +func (p *SQLiteParser) Parse() (localctx IParseContext) { + this := p + _ = this + + localctx = NewParseContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 0, SQLiteParserRULE_parse) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + p.SetState(229) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for ((int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&-6339801325483589630) != 0) || ((int64((_la-66)) & ^0x3f) == 0 && ((int64(1)<<(_la-66))&-7971300971697405919) != 0) || ((int64((_la-130)) & ^0x3f) == 0 && ((int64(1)<<(_la-130))&550913) != 0) { + { + p.SetState(226) + p.Sql_stmt_list() + } + + p.SetState(231) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + { + p.SetState(232) + p.Match(SQLiteParserEOF) + } + + return localctx +} + +// ISql_stmt_listContext is an interface to support dynamic dispatch. +type ISql_stmt_listContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + AllSql_stmt() []ISql_stmtContext + Sql_stmt(i int) ISql_stmtContext + AllSCOL() []antlr.TerminalNode + SCOL(i int) antlr.TerminalNode + + // IsSql_stmt_listContext differentiates from other interfaces. + IsSql_stmt_listContext() +} + +type Sql_stmt_listContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptySql_stmt_listContext() *Sql_stmt_listContext { + var p = new(Sql_stmt_listContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_sql_stmt_list + return p +} + +func (*Sql_stmt_listContext) IsSql_stmt_listContext() {} + +func NewSql_stmt_listContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Sql_stmt_listContext { + var p = new(Sql_stmt_listContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_sql_stmt_list + + return p +} + +func (s *Sql_stmt_listContext) GetParser() antlr.Parser { return s.parser } + +func (s *Sql_stmt_listContext) AllSql_stmt() []ISql_stmtContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(ISql_stmtContext); ok { + len++ + } + } + + tst := make([]ISql_stmtContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(ISql_stmtContext); ok { + tst[i] = t.(ISql_stmtContext) + i++ + } + } + + return tst +} + +func (s *Sql_stmt_listContext) Sql_stmt(i int) ISql_stmtContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ISql_stmtContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(ISql_stmtContext) +} + +func (s *Sql_stmt_listContext) AllSCOL() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserSCOL) +} + +func (s *Sql_stmt_listContext) SCOL(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserSCOL, i) +} + +func (s *Sql_stmt_listContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Sql_stmt_listContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Sql_stmt_listContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterSql_stmt_list(s) + } +} + +func (s *Sql_stmt_listContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitSql_stmt_list(s) + } +} + +func (p *SQLiteParser) Sql_stmt_list() (localctx ISql_stmt_listContext) { + this := p + _ = this + + localctx = NewSql_stmt_listContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 2, SQLiteParserRULE_sql_stmt_list) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + var _alt int + + p.EnterOuterAlt(localctx, 1) + p.SetState(237) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for _la == SQLiteParserSCOL { + { + p.SetState(234) + p.Match(SQLiteParserSCOL) + } + + p.SetState(239) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + { + p.SetState(240) + p.Sql_stmt() + } + p.SetState(249) + p.GetErrorHandler().Sync(p) + _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 3, p.GetParserRuleContext()) + + for _alt != 2 && _alt != antlr.ATNInvalidAltNumber { + if _alt == 1 { + p.SetState(242) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for ok := true; ok; ok = _la == SQLiteParserSCOL { + { + p.SetState(241) + p.Match(SQLiteParserSCOL) + } + + p.SetState(244) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + { + p.SetState(246) + p.Sql_stmt() + } + + } + p.SetState(251) + p.GetErrorHandler().Sync(p) + _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 3, p.GetParserRuleContext()) + } + p.SetState(255) + p.GetErrorHandler().Sync(p) + _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 4, p.GetParserRuleContext()) + + for _alt != 2 && _alt != antlr.ATNInvalidAltNumber { + if _alt == 1 { + { + p.SetState(252) + p.Match(SQLiteParserSCOL) + } + + } + p.SetState(257) + p.GetErrorHandler().Sync(p) + _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 4, p.GetParserRuleContext()) + } + + return localctx +} + +// ISql_stmtContext is an interface to support dynamic dispatch. +type ISql_stmtContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + Alter_table_stmt() IAlter_table_stmtContext + Analyze_stmt() IAnalyze_stmtContext + Attach_stmt() IAttach_stmtContext + Begin_stmt() IBegin_stmtContext + Commit_stmt() ICommit_stmtContext + Create_index_stmt() ICreate_index_stmtContext + Create_table_stmt() ICreate_table_stmtContext + Create_trigger_stmt() ICreate_trigger_stmtContext + Create_view_stmt() ICreate_view_stmtContext + Create_virtual_table_stmt() ICreate_virtual_table_stmtContext + Delete_stmt() IDelete_stmtContext + Delete_stmt_limited() IDelete_stmt_limitedContext + Detach_stmt() IDetach_stmtContext + Drop_stmt() IDrop_stmtContext + Insert_stmt() IInsert_stmtContext + Pragma_stmt() IPragma_stmtContext + Reindex_stmt() IReindex_stmtContext + Release_stmt() IRelease_stmtContext + Rollback_stmt() IRollback_stmtContext + Savepoint_stmt() ISavepoint_stmtContext + Select_stmt() ISelect_stmtContext + Update_stmt() IUpdate_stmtContext + Update_stmt_limited() IUpdate_stmt_limitedContext + Vacuum_stmt() IVacuum_stmtContext + EXPLAIN_() antlr.TerminalNode + QUERY_() antlr.TerminalNode + PLAN_() antlr.TerminalNode + + // IsSql_stmtContext differentiates from other interfaces. + IsSql_stmtContext() +} + +type Sql_stmtContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptySql_stmtContext() *Sql_stmtContext { + var p = new(Sql_stmtContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_sql_stmt + return p +} + +func (*Sql_stmtContext) IsSql_stmtContext() {} + +func NewSql_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Sql_stmtContext { + var p = new(Sql_stmtContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_sql_stmt + + return p +} + +func (s *Sql_stmtContext) GetParser() antlr.Parser { return s.parser } + +func (s *Sql_stmtContext) Alter_table_stmt() IAlter_table_stmtContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IAlter_table_stmtContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IAlter_table_stmtContext) +} + +func (s *Sql_stmtContext) Analyze_stmt() IAnalyze_stmtContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IAnalyze_stmtContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IAnalyze_stmtContext) +} + +func (s *Sql_stmtContext) Attach_stmt() IAttach_stmtContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IAttach_stmtContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IAttach_stmtContext) +} + +func (s *Sql_stmtContext) Begin_stmt() IBegin_stmtContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IBegin_stmtContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IBegin_stmtContext) +} + +func (s *Sql_stmtContext) Commit_stmt() ICommit_stmtContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ICommit_stmtContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ICommit_stmtContext) +} + +func (s *Sql_stmtContext) Create_index_stmt() ICreate_index_stmtContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ICreate_index_stmtContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ICreate_index_stmtContext) +} + +func (s *Sql_stmtContext) Create_table_stmt() ICreate_table_stmtContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ICreate_table_stmtContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ICreate_table_stmtContext) +} + +func (s *Sql_stmtContext) Create_trigger_stmt() ICreate_trigger_stmtContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ICreate_trigger_stmtContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ICreate_trigger_stmtContext) +} + +func (s *Sql_stmtContext) Create_view_stmt() ICreate_view_stmtContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ICreate_view_stmtContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ICreate_view_stmtContext) +} + +func (s *Sql_stmtContext) Create_virtual_table_stmt() ICreate_virtual_table_stmtContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ICreate_virtual_table_stmtContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ICreate_virtual_table_stmtContext) +} + +func (s *Sql_stmtContext) Delete_stmt() IDelete_stmtContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IDelete_stmtContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IDelete_stmtContext) +} + +func (s *Sql_stmtContext) Delete_stmt_limited() IDelete_stmt_limitedContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IDelete_stmt_limitedContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IDelete_stmt_limitedContext) +} + +func (s *Sql_stmtContext) Detach_stmt() IDetach_stmtContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IDetach_stmtContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IDetach_stmtContext) +} + +func (s *Sql_stmtContext) Drop_stmt() IDrop_stmtContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IDrop_stmtContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IDrop_stmtContext) +} + +func (s *Sql_stmtContext) Insert_stmt() IInsert_stmtContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IInsert_stmtContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IInsert_stmtContext) +} + +func (s *Sql_stmtContext) Pragma_stmt() IPragma_stmtContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IPragma_stmtContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IPragma_stmtContext) +} + +func (s *Sql_stmtContext) Reindex_stmt() IReindex_stmtContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IReindex_stmtContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IReindex_stmtContext) +} + +func (s *Sql_stmtContext) Release_stmt() IRelease_stmtContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IRelease_stmtContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IRelease_stmtContext) +} + +func (s *Sql_stmtContext) Rollback_stmt() IRollback_stmtContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IRollback_stmtContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IRollback_stmtContext) +} + +func (s *Sql_stmtContext) Savepoint_stmt() ISavepoint_stmtContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ISavepoint_stmtContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ISavepoint_stmtContext) +} + +func (s *Sql_stmtContext) Select_stmt() ISelect_stmtContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ISelect_stmtContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ISelect_stmtContext) +} + +func (s *Sql_stmtContext) Update_stmt() IUpdate_stmtContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IUpdate_stmtContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IUpdate_stmtContext) +} + +func (s *Sql_stmtContext) Update_stmt_limited() IUpdate_stmt_limitedContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IUpdate_stmt_limitedContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IUpdate_stmt_limitedContext) +} + +func (s *Sql_stmtContext) Vacuum_stmt() IVacuum_stmtContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IVacuum_stmtContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IVacuum_stmtContext) +} + +func (s *Sql_stmtContext) EXPLAIN_() antlr.TerminalNode { + return s.GetToken(SQLiteParserEXPLAIN_, 0) +} + +func (s *Sql_stmtContext) QUERY_() antlr.TerminalNode { + return s.GetToken(SQLiteParserQUERY_, 0) +} + +func (s *Sql_stmtContext) PLAN_() antlr.TerminalNode { + return s.GetToken(SQLiteParserPLAN_, 0) +} + +func (s *Sql_stmtContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Sql_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Sql_stmtContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterSql_stmt(s) + } +} + +func (s *Sql_stmtContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitSql_stmt(s) + } +} + +func (p *SQLiteParser) Sql_stmt() (localctx ISql_stmtContext) { + this := p + _ = this + + localctx = NewSql_stmtContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 4, SQLiteParserRULE_sql_stmt) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + p.SetState(263) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserEXPLAIN_ { + { + p.SetState(258) + p.Match(SQLiteParserEXPLAIN_) + } + p.SetState(261) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserQUERY_ { + { + p.SetState(259) + p.Match(SQLiteParserQUERY_) + } + { + p.SetState(260) + p.Match(SQLiteParserPLAN_) + } + + } + + } + p.SetState(289) + p.GetErrorHandler().Sync(p) + switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 7, p.GetParserRuleContext()) { + case 1: + { + p.SetState(265) + p.Alter_table_stmt() + } + + case 2: + { + p.SetState(266) + p.Analyze_stmt() + } + + case 3: + { + p.SetState(267) + p.Attach_stmt() + } + + case 4: + { + p.SetState(268) + p.Begin_stmt() + } + + case 5: + { + p.SetState(269) + p.Commit_stmt() + } + + case 6: + { + p.SetState(270) + p.Create_index_stmt() + } + + case 7: + { + p.SetState(271) + p.Create_table_stmt() + } + + case 8: + { + p.SetState(272) + p.Create_trigger_stmt() + } + + case 9: + { + p.SetState(273) + p.Create_view_stmt() + } + + case 10: + { + p.SetState(274) + p.Create_virtual_table_stmt() + } + + case 11: + { + p.SetState(275) + p.Delete_stmt() + } + + case 12: + { + p.SetState(276) + p.Delete_stmt_limited() + } + + case 13: + { + p.SetState(277) + p.Detach_stmt() + } + + case 14: + { + p.SetState(278) + p.Drop_stmt() + } + + case 15: + { + p.SetState(279) + p.Insert_stmt() + } + + case 16: + { + p.SetState(280) + p.Pragma_stmt() + } + + case 17: + { + p.SetState(281) + p.Reindex_stmt() + } + + case 18: + { + p.SetState(282) + p.Release_stmt() + } + + case 19: + { + p.SetState(283) + p.Rollback_stmt() + } + + case 20: + { + p.SetState(284) + p.Savepoint_stmt() + } + + case 21: + { + p.SetState(285) + p.Select_stmt() + } + + case 22: + { + p.SetState(286) + p.Update_stmt() + } + + case 23: + { + p.SetState(287) + p.Update_stmt_limited() + } + + case 24: + { + p.SetState(288) + p.Vacuum_stmt() + } + + } + + return localctx +} + +// IAlter_table_stmtContext is an interface to support dynamic dispatch. +type IAlter_table_stmtContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // GetNew_table_name returns the new_table_name rule contexts. + GetNew_table_name() ITable_nameContext + + // GetOld_column_name returns the old_column_name rule contexts. + GetOld_column_name() IColumn_nameContext + + // GetNew_column_name returns the new_column_name rule contexts. + GetNew_column_name() IColumn_nameContext + + // SetNew_table_name sets the new_table_name rule contexts. + SetNew_table_name(ITable_nameContext) + + // SetOld_column_name sets the old_column_name rule contexts. + SetOld_column_name(IColumn_nameContext) + + // SetNew_column_name sets the new_column_name rule contexts. + SetNew_column_name(IColumn_nameContext) + + // Getter signatures + ALTER_() antlr.TerminalNode + TABLE_() antlr.TerminalNode + AllTable_name() []ITable_nameContext + Table_name(i int) ITable_nameContext + RENAME_() antlr.TerminalNode + ADD_() antlr.TerminalNode + Column_def() IColumn_defContext + DROP_() antlr.TerminalNode + AllColumn_name() []IColumn_nameContext + Column_name(i int) IColumn_nameContext + Schema_name() ISchema_nameContext + DOT() antlr.TerminalNode + TO_() antlr.TerminalNode + COLUMN_() antlr.TerminalNode + + // IsAlter_table_stmtContext differentiates from other interfaces. + IsAlter_table_stmtContext() +} + +type Alter_table_stmtContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser + new_table_name ITable_nameContext + old_column_name IColumn_nameContext + new_column_name IColumn_nameContext +} + +func NewEmptyAlter_table_stmtContext() *Alter_table_stmtContext { + var p = new(Alter_table_stmtContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_alter_table_stmt + return p +} + +func (*Alter_table_stmtContext) IsAlter_table_stmtContext() {} + +func NewAlter_table_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Alter_table_stmtContext { + var p = new(Alter_table_stmtContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_alter_table_stmt + + return p +} + +func (s *Alter_table_stmtContext) GetParser() antlr.Parser { return s.parser } + +func (s *Alter_table_stmtContext) GetNew_table_name() ITable_nameContext { return s.new_table_name } + +func (s *Alter_table_stmtContext) GetOld_column_name() IColumn_nameContext { return s.old_column_name } + +func (s *Alter_table_stmtContext) GetNew_column_name() IColumn_nameContext { return s.new_column_name } + +func (s *Alter_table_stmtContext) SetNew_table_name(v ITable_nameContext) { s.new_table_name = v } + +func (s *Alter_table_stmtContext) SetOld_column_name(v IColumn_nameContext) { s.old_column_name = v } + +func (s *Alter_table_stmtContext) SetNew_column_name(v IColumn_nameContext) { s.new_column_name = v } + +func (s *Alter_table_stmtContext) ALTER_() antlr.TerminalNode { + return s.GetToken(SQLiteParserALTER_, 0) +} + +func (s *Alter_table_stmtContext) TABLE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserTABLE_, 0) +} + +func (s *Alter_table_stmtContext) AllTable_name() []ITable_nameContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(ITable_nameContext); ok { + len++ + } + } + + tst := make([]ITable_nameContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(ITable_nameContext); ok { + tst[i] = t.(ITable_nameContext) + i++ + } + } + + return tst +} + +func (s *Alter_table_stmtContext) Table_name(i int) ITable_nameContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ITable_nameContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(ITable_nameContext) +} + +func (s *Alter_table_stmtContext) RENAME_() antlr.TerminalNode { + return s.GetToken(SQLiteParserRENAME_, 0) +} + +func (s *Alter_table_stmtContext) ADD_() antlr.TerminalNode { + return s.GetToken(SQLiteParserADD_, 0) +} + +func (s *Alter_table_stmtContext) Column_def() IColumn_defContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IColumn_defContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IColumn_defContext) +} + +func (s *Alter_table_stmtContext) DROP_() antlr.TerminalNode { + return s.GetToken(SQLiteParserDROP_, 0) +} + +func (s *Alter_table_stmtContext) AllColumn_name() []IColumn_nameContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IColumn_nameContext); ok { + len++ + } + } + + tst := make([]IColumn_nameContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IColumn_nameContext); ok { + tst[i] = t.(IColumn_nameContext) + i++ + } + } + + return tst +} + +func (s *Alter_table_stmtContext) Column_name(i int) IColumn_nameContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IColumn_nameContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IColumn_nameContext) +} + +func (s *Alter_table_stmtContext) Schema_name() ISchema_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ISchema_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ISchema_nameContext) +} + +func (s *Alter_table_stmtContext) DOT() antlr.TerminalNode { + return s.GetToken(SQLiteParserDOT, 0) +} + +func (s *Alter_table_stmtContext) TO_() antlr.TerminalNode { + return s.GetToken(SQLiteParserTO_, 0) +} + +func (s *Alter_table_stmtContext) COLUMN_() antlr.TerminalNode { + return s.GetToken(SQLiteParserCOLUMN_, 0) +} + +func (s *Alter_table_stmtContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Alter_table_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Alter_table_stmtContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterAlter_table_stmt(s) + } +} + +func (s *Alter_table_stmtContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitAlter_table_stmt(s) + } +} + +func (p *SQLiteParser) Alter_table_stmt() (localctx IAlter_table_stmtContext) { + this := p + _ = this + + localctx = NewAlter_table_stmtContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 6, SQLiteParserRULE_alter_table_stmt) + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(291) + p.Match(SQLiteParserALTER_) + } + { + p.SetState(292) + p.Match(SQLiteParserTABLE_) + } + p.SetState(296) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 8, p.GetParserRuleContext()) == 1 { + { + p.SetState(293) + p.Schema_name() + } + { + p.SetState(294) + p.Match(SQLiteParserDOT) + } + + } + { + p.SetState(298) + p.Table_name() + } + p.SetState(321) + p.GetErrorHandler().Sync(p) + + switch p.GetTokenStream().LA(1) { + case SQLiteParserRENAME_: + { + p.SetState(299) + p.Match(SQLiteParserRENAME_) + } + p.SetState(309) + p.GetErrorHandler().Sync(p) + switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 10, p.GetParserRuleContext()) { + case 1: + { + p.SetState(300) + p.Match(SQLiteParserTO_) + } + { + p.SetState(301) + + var _x = p.Table_name() + + localctx.(*Alter_table_stmtContext).new_table_name = _x + } + + case 2: + p.SetState(303) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 9, p.GetParserRuleContext()) == 1 { + { + p.SetState(302) + p.Match(SQLiteParserCOLUMN_) + } + + } + { + p.SetState(305) + + var _x = p.Column_name() + + localctx.(*Alter_table_stmtContext).old_column_name = _x + } + { + p.SetState(306) + p.Match(SQLiteParserTO_) + } + { + p.SetState(307) + + var _x = p.Column_name() + + localctx.(*Alter_table_stmtContext).new_column_name = _x + } + + } + + case SQLiteParserADD_: + { + p.SetState(311) + p.Match(SQLiteParserADD_) + } + p.SetState(313) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 11, p.GetParserRuleContext()) == 1 { + { + p.SetState(312) + p.Match(SQLiteParserCOLUMN_) + } + + } + { + p.SetState(315) + p.Column_def() + } + + case SQLiteParserDROP_: + { + p.SetState(316) + p.Match(SQLiteParserDROP_) + } + p.SetState(318) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 12, p.GetParserRuleContext()) == 1 { + { + p.SetState(317) + p.Match(SQLiteParserCOLUMN_) + } + + } + { + p.SetState(320) + p.Column_name() + } + + default: + panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) + } + + return localctx +} + +// IAnalyze_stmtContext is an interface to support dynamic dispatch. +type IAnalyze_stmtContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + ANALYZE_() antlr.TerminalNode + Schema_name() ISchema_nameContext + Table_or_index_name() ITable_or_index_nameContext + DOT() antlr.TerminalNode + + // IsAnalyze_stmtContext differentiates from other interfaces. + IsAnalyze_stmtContext() +} + +type Analyze_stmtContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyAnalyze_stmtContext() *Analyze_stmtContext { + var p = new(Analyze_stmtContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_analyze_stmt + return p +} + +func (*Analyze_stmtContext) IsAnalyze_stmtContext() {} + +func NewAnalyze_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Analyze_stmtContext { + var p = new(Analyze_stmtContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_analyze_stmt + + return p +} + +func (s *Analyze_stmtContext) GetParser() antlr.Parser { return s.parser } + +func (s *Analyze_stmtContext) ANALYZE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserANALYZE_, 0) +} + +func (s *Analyze_stmtContext) Schema_name() ISchema_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ISchema_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ISchema_nameContext) +} + +func (s *Analyze_stmtContext) Table_or_index_name() ITable_or_index_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ITable_or_index_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ITable_or_index_nameContext) +} + +func (s *Analyze_stmtContext) DOT() antlr.TerminalNode { + return s.GetToken(SQLiteParserDOT, 0) +} + +func (s *Analyze_stmtContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Analyze_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Analyze_stmtContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterAnalyze_stmt(s) + } +} + +func (s *Analyze_stmtContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitAnalyze_stmt(s) + } +} + +func (p *SQLiteParser) Analyze_stmt() (localctx IAnalyze_stmtContext) { + this := p + _ = this + + localctx = NewAnalyze_stmtContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 8, SQLiteParserRULE_analyze_stmt) + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(323) + p.Match(SQLiteParserANALYZE_) + } + p.SetState(331) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 15, p.GetParserRuleContext()) == 1 { + { + p.SetState(324) + p.Schema_name() + } + + } else if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 15, p.GetParserRuleContext()) == 2 { + p.SetState(328) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 14, p.GetParserRuleContext()) == 1 { + { + p.SetState(325) + p.Schema_name() + } + { + p.SetState(326) + p.Match(SQLiteParserDOT) + } + + } + { + p.SetState(330) + p.Table_or_index_name() + } + + } + + return localctx +} + +// IAttach_stmtContext is an interface to support dynamic dispatch. +type IAttach_stmtContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + ATTACH_() antlr.TerminalNode + Expr() IExprContext + AS_() antlr.TerminalNode + Schema_name() ISchema_nameContext + DATABASE_() antlr.TerminalNode + + // IsAttach_stmtContext differentiates from other interfaces. + IsAttach_stmtContext() +} + +type Attach_stmtContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyAttach_stmtContext() *Attach_stmtContext { + var p = new(Attach_stmtContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_attach_stmt + return p +} + +func (*Attach_stmtContext) IsAttach_stmtContext() {} + +func NewAttach_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Attach_stmtContext { + var p = new(Attach_stmtContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_attach_stmt + + return p +} + +func (s *Attach_stmtContext) GetParser() antlr.Parser { return s.parser } + +func (s *Attach_stmtContext) ATTACH_() antlr.TerminalNode { + return s.GetToken(SQLiteParserATTACH_, 0) +} + +func (s *Attach_stmtContext) Expr() IExprContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IExprContext) +} + +func (s *Attach_stmtContext) AS_() antlr.TerminalNode { + return s.GetToken(SQLiteParserAS_, 0) +} + +func (s *Attach_stmtContext) Schema_name() ISchema_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ISchema_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ISchema_nameContext) +} + +func (s *Attach_stmtContext) DATABASE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserDATABASE_, 0) +} + +func (s *Attach_stmtContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Attach_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Attach_stmtContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterAttach_stmt(s) + } +} + +func (s *Attach_stmtContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitAttach_stmt(s) + } +} + +func (p *SQLiteParser) Attach_stmt() (localctx IAttach_stmtContext) { + this := p + _ = this + + localctx = NewAttach_stmtContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 10, SQLiteParserRULE_attach_stmt) + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(333) + p.Match(SQLiteParserATTACH_) + } + p.SetState(335) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 16, p.GetParserRuleContext()) == 1 { + { + p.SetState(334) + p.Match(SQLiteParserDATABASE_) + } + + } + { + p.SetState(337) + p.expr(0) + } + { + p.SetState(338) + p.Match(SQLiteParserAS_) + } + { + p.SetState(339) + p.Schema_name() + } + + return localctx +} + +// IBegin_stmtContext is an interface to support dynamic dispatch. +type IBegin_stmtContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + BEGIN_() antlr.TerminalNode + TRANSACTION_() antlr.TerminalNode + DEFERRED_() antlr.TerminalNode + IMMEDIATE_() antlr.TerminalNode + EXCLUSIVE_() antlr.TerminalNode + Transaction_name() ITransaction_nameContext + + // IsBegin_stmtContext differentiates from other interfaces. + IsBegin_stmtContext() +} + +type Begin_stmtContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyBegin_stmtContext() *Begin_stmtContext { + var p = new(Begin_stmtContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_begin_stmt + return p +} + +func (*Begin_stmtContext) IsBegin_stmtContext() {} + +func NewBegin_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Begin_stmtContext { + var p = new(Begin_stmtContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_begin_stmt + + return p +} + +func (s *Begin_stmtContext) GetParser() antlr.Parser { return s.parser } + +func (s *Begin_stmtContext) BEGIN_() antlr.TerminalNode { + return s.GetToken(SQLiteParserBEGIN_, 0) +} + +func (s *Begin_stmtContext) TRANSACTION_() antlr.TerminalNode { + return s.GetToken(SQLiteParserTRANSACTION_, 0) +} + +func (s *Begin_stmtContext) DEFERRED_() antlr.TerminalNode { + return s.GetToken(SQLiteParserDEFERRED_, 0) +} + +func (s *Begin_stmtContext) IMMEDIATE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserIMMEDIATE_, 0) +} + +func (s *Begin_stmtContext) EXCLUSIVE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserEXCLUSIVE_, 0) +} + +func (s *Begin_stmtContext) Transaction_name() ITransaction_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ITransaction_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ITransaction_nameContext) +} + +func (s *Begin_stmtContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Begin_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Begin_stmtContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterBegin_stmt(s) + } +} + +func (s *Begin_stmtContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitBegin_stmt(s) + } +} + +func (p *SQLiteParser) Begin_stmt() (localctx IBegin_stmtContext) { + this := p + _ = this + + localctx = NewBegin_stmtContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 12, SQLiteParserRULE_begin_stmt) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(341) + p.Match(SQLiteParserBEGIN_) + } + p.SetState(343) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if (int64((_la-58)) & ^0x3f) == 0 && ((int64(1)<<(_la-58))&16779265) != 0 { + { + p.SetState(342) + _la = p.GetTokenStream().LA(1) + + if !((int64((_la-58)) & ^0x3f) == 0 && ((int64(1)<<(_la-58))&16779265) != 0) { + p.GetErrorHandler().RecoverInline(p) + } else { + p.GetErrorHandler().ReportMatch(p) + p.Consume() + } + } + + } + p.SetState(349) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserTRANSACTION_ { + { + p.SetState(345) + p.Match(SQLiteParserTRANSACTION_) + } + p.SetState(347) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 18, p.GetParserRuleContext()) == 1 { + { + p.SetState(346) + p.Transaction_name() + } + + } + + } + + return localctx +} + +// ICommit_stmtContext is an interface to support dynamic dispatch. +type ICommit_stmtContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + COMMIT_() antlr.TerminalNode + END_() antlr.TerminalNode + TRANSACTION_() antlr.TerminalNode + + // IsCommit_stmtContext differentiates from other interfaces. + IsCommit_stmtContext() +} + +type Commit_stmtContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyCommit_stmtContext() *Commit_stmtContext { + var p = new(Commit_stmtContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_commit_stmt + return p +} + +func (*Commit_stmtContext) IsCommit_stmtContext() {} + +func NewCommit_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Commit_stmtContext { + var p = new(Commit_stmtContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_commit_stmt + + return p +} + +func (s *Commit_stmtContext) GetParser() antlr.Parser { return s.parser } + +func (s *Commit_stmtContext) COMMIT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserCOMMIT_, 0) +} + +func (s *Commit_stmtContext) END_() antlr.TerminalNode { + return s.GetToken(SQLiteParserEND_, 0) +} + +func (s *Commit_stmtContext) TRANSACTION_() antlr.TerminalNode { + return s.GetToken(SQLiteParserTRANSACTION_, 0) +} + +func (s *Commit_stmtContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Commit_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Commit_stmtContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterCommit_stmt(s) + } +} + +func (s *Commit_stmtContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitCommit_stmt(s) + } +} + +func (p *SQLiteParser) Commit_stmt() (localctx ICommit_stmtContext) { + this := p + _ = this + + localctx = NewCommit_stmtContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 14, SQLiteParserRULE_commit_stmt) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(351) + _la = p.GetTokenStream().LA(1) + + if !(_la == SQLiteParserCOMMIT_ || _la == SQLiteParserEND_) { + p.GetErrorHandler().RecoverInline(p) + } else { + p.GetErrorHandler().ReportMatch(p) + p.Consume() + } + } + p.SetState(353) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserTRANSACTION_ { + { + p.SetState(352) + p.Match(SQLiteParserTRANSACTION_) + } + + } + + return localctx +} + +// IRollback_stmtContext is an interface to support dynamic dispatch. +type IRollback_stmtContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + ROLLBACK_() antlr.TerminalNode + TRANSACTION_() antlr.TerminalNode + TO_() antlr.TerminalNode + Savepoint_name() ISavepoint_nameContext + SAVEPOINT_() antlr.TerminalNode + + // IsRollback_stmtContext differentiates from other interfaces. + IsRollback_stmtContext() +} + +type Rollback_stmtContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyRollback_stmtContext() *Rollback_stmtContext { + var p = new(Rollback_stmtContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_rollback_stmt + return p +} + +func (*Rollback_stmtContext) IsRollback_stmtContext() {} + +func NewRollback_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Rollback_stmtContext { + var p = new(Rollback_stmtContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_rollback_stmt + + return p +} + +func (s *Rollback_stmtContext) GetParser() antlr.Parser { return s.parser } + +func (s *Rollback_stmtContext) ROLLBACK_() antlr.TerminalNode { + return s.GetToken(SQLiteParserROLLBACK_, 0) +} + +func (s *Rollback_stmtContext) TRANSACTION_() antlr.TerminalNode { + return s.GetToken(SQLiteParserTRANSACTION_, 0) +} + +func (s *Rollback_stmtContext) TO_() antlr.TerminalNode { + return s.GetToken(SQLiteParserTO_, 0) +} + +func (s *Rollback_stmtContext) Savepoint_name() ISavepoint_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ISavepoint_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ISavepoint_nameContext) +} + +func (s *Rollback_stmtContext) SAVEPOINT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserSAVEPOINT_, 0) +} + +func (s *Rollback_stmtContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Rollback_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Rollback_stmtContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterRollback_stmt(s) + } +} + +func (s *Rollback_stmtContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitRollback_stmt(s) + } +} + +func (p *SQLiteParser) Rollback_stmt() (localctx IRollback_stmtContext) { + this := p + _ = this + + localctx = NewRollback_stmtContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 16, SQLiteParserRULE_rollback_stmt) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(355) + p.Match(SQLiteParserROLLBACK_) + } + p.SetState(357) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserTRANSACTION_ { + { + p.SetState(356) + p.Match(SQLiteParserTRANSACTION_) + } + + } + p.SetState(364) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserTO_ { + { + p.SetState(359) + p.Match(SQLiteParserTO_) + } + p.SetState(361) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 22, p.GetParserRuleContext()) == 1 { + { + p.SetState(360) + p.Match(SQLiteParserSAVEPOINT_) + } + + } + { + p.SetState(363) + p.Savepoint_name() + } + + } + + return localctx +} + +// ISavepoint_stmtContext is an interface to support dynamic dispatch. +type ISavepoint_stmtContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + SAVEPOINT_() antlr.TerminalNode + Savepoint_name() ISavepoint_nameContext + + // IsSavepoint_stmtContext differentiates from other interfaces. + IsSavepoint_stmtContext() +} + +type Savepoint_stmtContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptySavepoint_stmtContext() *Savepoint_stmtContext { + var p = new(Savepoint_stmtContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_savepoint_stmt + return p +} + +func (*Savepoint_stmtContext) IsSavepoint_stmtContext() {} + +func NewSavepoint_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Savepoint_stmtContext { + var p = new(Savepoint_stmtContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_savepoint_stmt + + return p +} + +func (s *Savepoint_stmtContext) GetParser() antlr.Parser { return s.parser } + +func (s *Savepoint_stmtContext) SAVEPOINT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserSAVEPOINT_, 0) +} + +func (s *Savepoint_stmtContext) Savepoint_name() ISavepoint_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ISavepoint_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ISavepoint_nameContext) +} + +func (s *Savepoint_stmtContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Savepoint_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Savepoint_stmtContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterSavepoint_stmt(s) + } +} + +func (s *Savepoint_stmtContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitSavepoint_stmt(s) + } +} + +func (p *SQLiteParser) Savepoint_stmt() (localctx ISavepoint_stmtContext) { + this := p + _ = this + + localctx = NewSavepoint_stmtContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 18, SQLiteParserRULE_savepoint_stmt) + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(366) + p.Match(SQLiteParserSAVEPOINT_) + } + { + p.SetState(367) + p.Savepoint_name() + } + + return localctx +} + +// IRelease_stmtContext is an interface to support dynamic dispatch. +type IRelease_stmtContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + RELEASE_() antlr.TerminalNode + Savepoint_name() ISavepoint_nameContext + SAVEPOINT_() antlr.TerminalNode + + // IsRelease_stmtContext differentiates from other interfaces. + IsRelease_stmtContext() +} + +type Release_stmtContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyRelease_stmtContext() *Release_stmtContext { + var p = new(Release_stmtContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_release_stmt + return p +} + +func (*Release_stmtContext) IsRelease_stmtContext() {} + +func NewRelease_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Release_stmtContext { + var p = new(Release_stmtContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_release_stmt + + return p +} + +func (s *Release_stmtContext) GetParser() antlr.Parser { return s.parser } + +func (s *Release_stmtContext) RELEASE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserRELEASE_, 0) +} + +func (s *Release_stmtContext) Savepoint_name() ISavepoint_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ISavepoint_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ISavepoint_nameContext) +} + +func (s *Release_stmtContext) SAVEPOINT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserSAVEPOINT_, 0) +} + +func (s *Release_stmtContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Release_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Release_stmtContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterRelease_stmt(s) + } +} + +func (s *Release_stmtContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitRelease_stmt(s) + } +} + +func (p *SQLiteParser) Release_stmt() (localctx IRelease_stmtContext) { + this := p + _ = this + + localctx = NewRelease_stmtContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 20, SQLiteParserRULE_release_stmt) + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(369) + p.Match(SQLiteParserRELEASE_) + } + p.SetState(371) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 24, p.GetParserRuleContext()) == 1 { + { + p.SetState(370) + p.Match(SQLiteParserSAVEPOINT_) + } + + } + { + p.SetState(373) + p.Savepoint_name() + } + + return localctx +} + +// ICreate_index_stmtContext is an interface to support dynamic dispatch. +type ICreate_index_stmtContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + CREATE_() antlr.TerminalNode + INDEX_() antlr.TerminalNode + Index_name() IIndex_nameContext + ON_() antlr.TerminalNode + Table_name() ITable_nameContext + OPEN_PAR() antlr.TerminalNode + AllIndexed_column() []IIndexed_columnContext + Indexed_column(i int) IIndexed_columnContext + CLOSE_PAR() antlr.TerminalNode + UNIQUE_() antlr.TerminalNode + IF_() antlr.TerminalNode + NOT_() antlr.TerminalNode + EXISTS_() antlr.TerminalNode + Schema_name() ISchema_nameContext + DOT() antlr.TerminalNode + AllCOMMA() []antlr.TerminalNode + COMMA(i int) antlr.TerminalNode + WHERE_() antlr.TerminalNode + Expr() IExprContext + + // IsCreate_index_stmtContext differentiates from other interfaces. + IsCreate_index_stmtContext() +} + +type Create_index_stmtContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyCreate_index_stmtContext() *Create_index_stmtContext { + var p = new(Create_index_stmtContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_create_index_stmt + return p +} + +func (*Create_index_stmtContext) IsCreate_index_stmtContext() {} + +func NewCreate_index_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Create_index_stmtContext { + var p = new(Create_index_stmtContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_create_index_stmt + + return p +} + +func (s *Create_index_stmtContext) GetParser() antlr.Parser { return s.parser } + +func (s *Create_index_stmtContext) CREATE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserCREATE_, 0) +} + +func (s *Create_index_stmtContext) INDEX_() antlr.TerminalNode { + return s.GetToken(SQLiteParserINDEX_, 0) +} + +func (s *Create_index_stmtContext) Index_name() IIndex_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IIndex_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IIndex_nameContext) +} + +func (s *Create_index_stmtContext) ON_() antlr.TerminalNode { + return s.GetToken(SQLiteParserON_, 0) +} + +func (s *Create_index_stmtContext) Table_name() ITable_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ITable_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ITable_nameContext) +} + +func (s *Create_index_stmtContext) OPEN_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserOPEN_PAR, 0) +} + +func (s *Create_index_stmtContext) AllIndexed_column() []IIndexed_columnContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IIndexed_columnContext); ok { + len++ + } + } + + tst := make([]IIndexed_columnContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IIndexed_columnContext); ok { + tst[i] = t.(IIndexed_columnContext) + i++ + } + } + + return tst +} + +func (s *Create_index_stmtContext) Indexed_column(i int) IIndexed_columnContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IIndexed_columnContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IIndexed_columnContext) +} + +func (s *Create_index_stmtContext) CLOSE_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserCLOSE_PAR, 0) +} + +func (s *Create_index_stmtContext) UNIQUE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserUNIQUE_, 0) +} + +func (s *Create_index_stmtContext) IF_() antlr.TerminalNode { + return s.GetToken(SQLiteParserIF_, 0) +} + +func (s *Create_index_stmtContext) NOT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserNOT_, 0) +} + +func (s *Create_index_stmtContext) EXISTS_() antlr.TerminalNode { + return s.GetToken(SQLiteParserEXISTS_, 0) +} + +func (s *Create_index_stmtContext) Schema_name() ISchema_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ISchema_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ISchema_nameContext) +} + +func (s *Create_index_stmtContext) DOT() antlr.TerminalNode { + return s.GetToken(SQLiteParserDOT, 0) +} + +func (s *Create_index_stmtContext) AllCOMMA() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserCOMMA) +} + +func (s *Create_index_stmtContext) COMMA(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserCOMMA, i) +} + +func (s *Create_index_stmtContext) WHERE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserWHERE_, 0) +} + +func (s *Create_index_stmtContext) Expr() IExprContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IExprContext) +} + +func (s *Create_index_stmtContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Create_index_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Create_index_stmtContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterCreate_index_stmt(s) + } +} + +func (s *Create_index_stmtContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitCreate_index_stmt(s) + } +} + +func (p *SQLiteParser) Create_index_stmt() (localctx ICreate_index_stmtContext) { + this := p + _ = this + + localctx = NewCreate_index_stmtContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 22, SQLiteParserRULE_create_index_stmt) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(375) + p.Match(SQLiteParserCREATE_) + } + p.SetState(377) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserUNIQUE_ { + { + p.SetState(376) + p.Match(SQLiteParserUNIQUE_) + } + + } + { + p.SetState(379) + p.Match(SQLiteParserINDEX_) + } + p.SetState(383) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 26, p.GetParserRuleContext()) == 1 { + { + p.SetState(380) + p.Match(SQLiteParserIF_) + } + { + p.SetState(381) + p.Match(SQLiteParserNOT_) + } + { + p.SetState(382) + p.Match(SQLiteParserEXISTS_) + } + + } + p.SetState(388) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 27, p.GetParserRuleContext()) == 1 { + { + p.SetState(385) + p.Schema_name() + } + { + p.SetState(386) + p.Match(SQLiteParserDOT) + } + + } + { + p.SetState(390) + p.Index_name() + } + { + p.SetState(391) + p.Match(SQLiteParserON_) + } + { + p.SetState(392) + p.Table_name() + } + { + p.SetState(393) + p.Match(SQLiteParserOPEN_PAR) + } + { + p.SetState(394) + p.Indexed_column() + } + p.SetState(399) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for _la == SQLiteParserCOMMA { + { + p.SetState(395) + p.Match(SQLiteParserCOMMA) + } + { + p.SetState(396) + p.Indexed_column() + } + + p.SetState(401) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + { + p.SetState(402) + p.Match(SQLiteParserCLOSE_PAR) + } + p.SetState(405) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserWHERE_ { + { + p.SetState(403) + p.Match(SQLiteParserWHERE_) + } + { + p.SetState(404) + p.expr(0) + } + + } + + return localctx +} + +// IIndexed_columnContext is an interface to support dynamic dispatch. +type IIndexed_columnContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + Column_name() IColumn_nameContext + Expr() IExprContext + COLLATE_() antlr.TerminalNode + Collation_name() ICollation_nameContext + Asc_desc() IAsc_descContext + + // IsIndexed_columnContext differentiates from other interfaces. + IsIndexed_columnContext() +} + +type Indexed_columnContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyIndexed_columnContext() *Indexed_columnContext { + var p = new(Indexed_columnContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_indexed_column + return p +} + +func (*Indexed_columnContext) IsIndexed_columnContext() {} + +func NewIndexed_columnContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Indexed_columnContext { + var p = new(Indexed_columnContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_indexed_column + + return p +} + +func (s *Indexed_columnContext) GetParser() antlr.Parser { return s.parser } + +func (s *Indexed_columnContext) Column_name() IColumn_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IColumn_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IColumn_nameContext) +} + +func (s *Indexed_columnContext) Expr() IExprContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IExprContext) +} + +func (s *Indexed_columnContext) COLLATE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserCOLLATE_, 0) +} + +func (s *Indexed_columnContext) Collation_name() ICollation_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ICollation_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ICollation_nameContext) +} + +func (s *Indexed_columnContext) Asc_desc() IAsc_descContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IAsc_descContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IAsc_descContext) +} + +func (s *Indexed_columnContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Indexed_columnContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Indexed_columnContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterIndexed_column(s) + } +} + +func (s *Indexed_columnContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitIndexed_column(s) + } +} + +func (p *SQLiteParser) Indexed_column() (localctx IIndexed_columnContext) { + this := p + _ = this + + localctx = NewIndexed_columnContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 24, SQLiteParserRULE_indexed_column) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + p.SetState(409) + p.GetErrorHandler().Sync(p) + switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 30, p.GetParserRuleContext()) { + case 1: + { + p.SetState(407) + p.Column_name() + } + + case 2: + { + p.SetState(408) + p.expr(0) + } + + } + p.SetState(413) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserCOLLATE_ { + { + p.SetState(411) + p.Match(SQLiteParserCOLLATE_) + } + { + p.SetState(412) + p.Collation_name() + } + + } + p.SetState(416) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserASC_ || _la == SQLiteParserDESC_ { + { + p.SetState(415) + p.Asc_desc() + } + + } + + return localctx +} + +// ICreate_table_stmtContext is an interface to support dynamic dispatch. +type ICreate_table_stmtContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // GetRow_ROW_ID returns the row_ROW_ID token. + GetRow_ROW_ID() antlr.Token + + // SetRow_ROW_ID sets the row_ROW_ID token. + SetRow_ROW_ID(antlr.Token) + + // Getter signatures + CREATE_() antlr.TerminalNode + TABLE_() antlr.TerminalNode + Table_name() ITable_nameContext + OPEN_PAR() antlr.TerminalNode + AllColumn_def() []IColumn_defContext + Column_def(i int) IColumn_defContext + CLOSE_PAR() antlr.TerminalNode + AS_() antlr.TerminalNode + Select_stmt() ISelect_stmtContext + IF_() antlr.TerminalNode + NOT_() antlr.TerminalNode + EXISTS_() antlr.TerminalNode + Schema_name() ISchema_nameContext + DOT() antlr.TerminalNode + TEMP_() antlr.TerminalNode + TEMPORARY_() antlr.TerminalNode + AllCOMMA() []antlr.TerminalNode + COMMA(i int) antlr.TerminalNode + AllTable_constraint() []ITable_constraintContext + Table_constraint(i int) ITable_constraintContext + WITHOUT_() antlr.TerminalNode + IDENTIFIER() antlr.TerminalNode + + // IsCreate_table_stmtContext differentiates from other interfaces. + IsCreate_table_stmtContext() +} + +type Create_table_stmtContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser + row_ROW_ID antlr.Token +} + +func NewEmptyCreate_table_stmtContext() *Create_table_stmtContext { + var p = new(Create_table_stmtContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_create_table_stmt + return p +} + +func (*Create_table_stmtContext) IsCreate_table_stmtContext() {} + +func NewCreate_table_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Create_table_stmtContext { + var p = new(Create_table_stmtContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_create_table_stmt + + return p +} + +func (s *Create_table_stmtContext) GetParser() antlr.Parser { return s.parser } + +func (s *Create_table_stmtContext) GetRow_ROW_ID() antlr.Token { return s.row_ROW_ID } + +func (s *Create_table_stmtContext) SetRow_ROW_ID(v antlr.Token) { s.row_ROW_ID = v } + +func (s *Create_table_stmtContext) CREATE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserCREATE_, 0) +} + +func (s *Create_table_stmtContext) TABLE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserTABLE_, 0) +} + +func (s *Create_table_stmtContext) Table_name() ITable_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ITable_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ITable_nameContext) +} + +func (s *Create_table_stmtContext) OPEN_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserOPEN_PAR, 0) +} + +func (s *Create_table_stmtContext) AllColumn_def() []IColumn_defContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IColumn_defContext); ok { + len++ + } + } + + tst := make([]IColumn_defContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IColumn_defContext); ok { + tst[i] = t.(IColumn_defContext) + i++ + } + } + + return tst +} + +func (s *Create_table_stmtContext) Column_def(i int) IColumn_defContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IColumn_defContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IColumn_defContext) +} + +func (s *Create_table_stmtContext) CLOSE_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserCLOSE_PAR, 0) +} + +func (s *Create_table_stmtContext) AS_() antlr.TerminalNode { + return s.GetToken(SQLiteParserAS_, 0) +} + +func (s *Create_table_stmtContext) Select_stmt() ISelect_stmtContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ISelect_stmtContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ISelect_stmtContext) +} + +func (s *Create_table_stmtContext) IF_() antlr.TerminalNode { + return s.GetToken(SQLiteParserIF_, 0) +} + +func (s *Create_table_stmtContext) NOT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserNOT_, 0) +} + +func (s *Create_table_stmtContext) EXISTS_() antlr.TerminalNode { + return s.GetToken(SQLiteParserEXISTS_, 0) +} + +func (s *Create_table_stmtContext) Schema_name() ISchema_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ISchema_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ISchema_nameContext) +} + +func (s *Create_table_stmtContext) DOT() antlr.TerminalNode { + return s.GetToken(SQLiteParserDOT, 0) +} + +func (s *Create_table_stmtContext) TEMP_() antlr.TerminalNode { + return s.GetToken(SQLiteParserTEMP_, 0) +} + +func (s *Create_table_stmtContext) TEMPORARY_() antlr.TerminalNode { + return s.GetToken(SQLiteParserTEMPORARY_, 0) +} + +func (s *Create_table_stmtContext) AllCOMMA() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserCOMMA) +} + +func (s *Create_table_stmtContext) COMMA(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserCOMMA, i) +} + +func (s *Create_table_stmtContext) AllTable_constraint() []ITable_constraintContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(ITable_constraintContext); ok { + len++ + } + } + + tst := make([]ITable_constraintContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(ITable_constraintContext); ok { + tst[i] = t.(ITable_constraintContext) + i++ + } + } + + return tst +} + +func (s *Create_table_stmtContext) Table_constraint(i int) ITable_constraintContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ITable_constraintContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(ITable_constraintContext) +} + +func (s *Create_table_stmtContext) WITHOUT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserWITHOUT_, 0) +} + +func (s *Create_table_stmtContext) IDENTIFIER() antlr.TerminalNode { + return s.GetToken(SQLiteParserIDENTIFIER, 0) +} + +func (s *Create_table_stmtContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Create_table_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Create_table_stmtContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterCreate_table_stmt(s) + } +} + +func (s *Create_table_stmtContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitCreate_table_stmt(s) + } +} + +func (p *SQLiteParser) Create_table_stmt() (localctx ICreate_table_stmtContext) { + this := p + _ = this + + localctx = NewCreate_table_stmtContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 26, SQLiteParserRULE_create_table_stmt) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + var _alt int + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(418) + p.Match(SQLiteParserCREATE_) + } + p.SetState(420) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserTEMP_ || _la == SQLiteParserTEMPORARY_ { + { + p.SetState(419) + _la = p.GetTokenStream().LA(1) + + if !(_la == SQLiteParserTEMP_ || _la == SQLiteParserTEMPORARY_) { + p.GetErrorHandler().RecoverInline(p) + } else { + p.GetErrorHandler().ReportMatch(p) + p.Consume() + } + } + + } + { + p.SetState(422) + p.Match(SQLiteParserTABLE_) + } + p.SetState(426) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 34, p.GetParserRuleContext()) == 1 { + { + p.SetState(423) + p.Match(SQLiteParserIF_) + } + { + p.SetState(424) + p.Match(SQLiteParserNOT_) + } + { + p.SetState(425) + p.Match(SQLiteParserEXISTS_) + } + + } + p.SetState(431) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 35, p.GetParserRuleContext()) == 1 { + { + p.SetState(428) + p.Schema_name() + } + { + p.SetState(429) + p.Match(SQLiteParserDOT) + } + + } + { + p.SetState(433) + p.Table_name() + } + p.SetState(457) + p.GetErrorHandler().Sync(p) + + switch p.GetTokenStream().LA(1) { + case SQLiteParserOPEN_PAR: + { + p.SetState(434) + p.Match(SQLiteParserOPEN_PAR) + } + { + p.SetState(435) + p.Column_def() + } + p.SetState(440) + p.GetErrorHandler().Sync(p) + _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 36, p.GetParserRuleContext()) + + for _alt != 1 && _alt != antlr.ATNInvalidAltNumber { + if _alt == 1+1 { + { + p.SetState(436) + p.Match(SQLiteParserCOMMA) + } + { + p.SetState(437) + p.Column_def() + } + + } + p.SetState(442) + p.GetErrorHandler().Sync(p) + _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 36, p.GetParserRuleContext()) + } + p.SetState(447) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for _la == SQLiteParserCOMMA { + { + p.SetState(443) + p.Match(SQLiteParserCOMMA) + } + { + p.SetState(444) + p.Table_constraint() + } + + p.SetState(449) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + { + p.SetState(450) + p.Match(SQLiteParserCLOSE_PAR) + } + p.SetState(453) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserWITHOUT_ { + { + p.SetState(451) + p.Match(SQLiteParserWITHOUT_) + } + { + p.SetState(452) + + var _m = p.Match(SQLiteParserIDENTIFIER) + + localctx.(*Create_table_stmtContext).row_ROW_ID = _m + } + + } + + case SQLiteParserAS_: + { + p.SetState(455) + p.Match(SQLiteParserAS_) + } + { + p.SetState(456) + p.Select_stmt() + } + + default: + panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) + } + + return localctx +} + +// IColumn_defContext is an interface to support dynamic dispatch. +type IColumn_defContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + Column_name() IColumn_nameContext + Type_name() IType_nameContext + AllColumn_constraint() []IColumn_constraintContext + Column_constraint(i int) IColumn_constraintContext + + // IsColumn_defContext differentiates from other interfaces. + IsColumn_defContext() +} + +type Column_defContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyColumn_defContext() *Column_defContext { + var p = new(Column_defContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_column_def + return p +} + +func (*Column_defContext) IsColumn_defContext() {} + +func NewColumn_defContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Column_defContext { + var p = new(Column_defContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_column_def + + return p +} + +func (s *Column_defContext) GetParser() antlr.Parser { return s.parser } + +func (s *Column_defContext) Column_name() IColumn_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IColumn_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IColumn_nameContext) +} + +func (s *Column_defContext) Type_name() IType_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IType_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IType_nameContext) +} + +func (s *Column_defContext) AllColumn_constraint() []IColumn_constraintContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IColumn_constraintContext); ok { + len++ + } + } + + tst := make([]IColumn_constraintContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IColumn_constraintContext); ok { + tst[i] = t.(IColumn_constraintContext) + i++ + } + } + + return tst +} + +func (s *Column_defContext) Column_constraint(i int) IColumn_constraintContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IColumn_constraintContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IColumn_constraintContext) +} + +func (s *Column_defContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Column_defContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Column_defContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterColumn_def(s) + } +} + +func (s *Column_defContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitColumn_def(s) + } +} + +func (p *SQLiteParser) Column_def() (localctx IColumn_defContext) { + this := p + _ = this + + localctx = NewColumn_defContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 28, SQLiteParserRULE_column_def) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(459) + p.Column_name() + } + p.SetState(461) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 40, p.GetParserRuleContext()) == 1 { + { + p.SetState(460) + p.Type_name() + } + + } + p.SetState(466) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for ((int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&72673329139417088) != 0) || ((int64((_la-102)) & ^0x3f) == 0 && ((int64(1)<<(_la-102))&274877941765) != 0) || _la == SQLiteParserGENERATED_ { + { + p.SetState(463) + p.Column_constraint() + } + + p.SetState(468) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + + return localctx +} + +// IType_nameContext is an interface to support dynamic dispatch. +type IType_nameContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + AllName() []INameContext + Name(i int) INameContext + OPEN_PAR() antlr.TerminalNode + AllSigned_number() []ISigned_numberContext + Signed_number(i int) ISigned_numberContext + CLOSE_PAR() antlr.TerminalNode + COMMA() antlr.TerminalNode + + // IsType_nameContext differentiates from other interfaces. + IsType_nameContext() +} + +type Type_nameContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyType_nameContext() *Type_nameContext { + var p = new(Type_nameContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_type_name + return p +} + +func (*Type_nameContext) IsType_nameContext() {} + +func NewType_nameContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Type_nameContext { + var p = new(Type_nameContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_type_name + + return p +} + +func (s *Type_nameContext) GetParser() antlr.Parser { return s.parser } + +func (s *Type_nameContext) AllName() []INameContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(INameContext); ok { + len++ + } + } + + tst := make([]INameContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(INameContext); ok { + tst[i] = t.(INameContext) + i++ + } + } + + return tst +} + +func (s *Type_nameContext) Name(i int) INameContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(INameContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(INameContext) +} + +func (s *Type_nameContext) OPEN_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserOPEN_PAR, 0) +} + +func (s *Type_nameContext) AllSigned_number() []ISigned_numberContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(ISigned_numberContext); ok { + len++ + } + } + + tst := make([]ISigned_numberContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(ISigned_numberContext); ok { + tst[i] = t.(ISigned_numberContext) + i++ + } + } + + return tst +} + +func (s *Type_nameContext) Signed_number(i int) ISigned_numberContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ISigned_numberContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(ISigned_numberContext) +} + +func (s *Type_nameContext) CLOSE_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserCLOSE_PAR, 0) +} + +func (s *Type_nameContext) COMMA() antlr.TerminalNode { + return s.GetToken(SQLiteParserCOMMA, 0) +} + +func (s *Type_nameContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Type_nameContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Type_nameContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterType_name(s) + } +} + +func (s *Type_nameContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitType_name(s) + } +} + +func (p *SQLiteParser) Type_name() (localctx IType_nameContext) { + this := p + _ = this + + localctx = NewType_nameContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 30, SQLiteParserRULE_type_name) + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + var _alt int + + p.EnterOuterAlt(localctx, 1) + p.SetState(470) + p.GetErrorHandler().Sync(p) + _alt = 1 + 1 + for ok := true; ok; ok = _alt != 1 && _alt != antlr.ATNInvalidAltNumber { + switch _alt { + case 1 + 1: + { + p.SetState(469) + p.Name() + } + + default: + panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) + } + + p.SetState(472) + p.GetErrorHandler().Sync(p) + _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 42, p.GetParserRuleContext()) + } + p.SetState(484) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 43, p.GetParserRuleContext()) == 1 { + { + p.SetState(474) + p.Match(SQLiteParserOPEN_PAR) + } + { + p.SetState(475) + p.Signed_number() + } + { + p.SetState(476) + p.Match(SQLiteParserCLOSE_PAR) + } + + } else if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 43, p.GetParserRuleContext()) == 2 { + { + p.SetState(478) + p.Match(SQLiteParserOPEN_PAR) + } + { + p.SetState(479) + p.Signed_number() + } + { + p.SetState(480) + p.Match(SQLiteParserCOMMA) + } + { + p.SetState(481) + p.Signed_number() + } + { + p.SetState(482) + p.Match(SQLiteParserCLOSE_PAR) + } + + } + + return localctx +} + +// IColumn_constraintContext is an interface to support dynamic dispatch. +type IColumn_constraintContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + CHECK_() antlr.TerminalNode + OPEN_PAR() antlr.TerminalNode + Expr() IExprContext + CLOSE_PAR() antlr.TerminalNode + DEFAULT_() antlr.TerminalNode + COLLATE_() antlr.TerminalNode + Collation_name() ICollation_nameContext + Foreign_key_clause() IForeign_key_clauseContext + AS_() antlr.TerminalNode + CONSTRAINT_() antlr.TerminalNode + Name() INameContext + PRIMARY_() antlr.TerminalNode + KEY_() antlr.TerminalNode + NULL_() antlr.TerminalNode + UNIQUE_() antlr.TerminalNode + Signed_number() ISigned_numberContext + Literal_value() ILiteral_valueContext + Conflict_clause() IConflict_clauseContext + GENERATED_() antlr.TerminalNode + ALWAYS_() antlr.TerminalNode + STORED_() antlr.TerminalNode + VIRTUAL_() antlr.TerminalNode + Asc_desc() IAsc_descContext + AUTOINCREMENT_() antlr.TerminalNode + NOT_() antlr.TerminalNode + + // IsColumn_constraintContext differentiates from other interfaces. + IsColumn_constraintContext() +} + +type Column_constraintContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyColumn_constraintContext() *Column_constraintContext { + var p = new(Column_constraintContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_column_constraint + return p +} + +func (*Column_constraintContext) IsColumn_constraintContext() {} + +func NewColumn_constraintContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Column_constraintContext { + var p = new(Column_constraintContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_column_constraint + + return p +} + +func (s *Column_constraintContext) GetParser() antlr.Parser { return s.parser } + +func (s *Column_constraintContext) CHECK_() antlr.TerminalNode { + return s.GetToken(SQLiteParserCHECK_, 0) +} + +func (s *Column_constraintContext) OPEN_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserOPEN_PAR, 0) +} + +func (s *Column_constraintContext) Expr() IExprContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IExprContext) +} + +func (s *Column_constraintContext) CLOSE_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserCLOSE_PAR, 0) +} + +func (s *Column_constraintContext) DEFAULT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserDEFAULT_, 0) +} + +func (s *Column_constraintContext) COLLATE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserCOLLATE_, 0) +} + +func (s *Column_constraintContext) Collation_name() ICollation_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ICollation_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ICollation_nameContext) +} + +func (s *Column_constraintContext) Foreign_key_clause() IForeign_key_clauseContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IForeign_key_clauseContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IForeign_key_clauseContext) +} + +func (s *Column_constraintContext) AS_() antlr.TerminalNode { + return s.GetToken(SQLiteParserAS_, 0) +} + +func (s *Column_constraintContext) CONSTRAINT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserCONSTRAINT_, 0) +} + +func (s *Column_constraintContext) Name() INameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(INameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(INameContext) +} + +func (s *Column_constraintContext) PRIMARY_() antlr.TerminalNode { + return s.GetToken(SQLiteParserPRIMARY_, 0) +} + +func (s *Column_constraintContext) KEY_() antlr.TerminalNode { + return s.GetToken(SQLiteParserKEY_, 0) +} + +func (s *Column_constraintContext) NULL_() antlr.TerminalNode { + return s.GetToken(SQLiteParserNULL_, 0) +} + +func (s *Column_constraintContext) UNIQUE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserUNIQUE_, 0) +} + +func (s *Column_constraintContext) Signed_number() ISigned_numberContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ISigned_numberContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ISigned_numberContext) +} + +func (s *Column_constraintContext) Literal_value() ILiteral_valueContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ILiteral_valueContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ILiteral_valueContext) +} + +func (s *Column_constraintContext) Conflict_clause() IConflict_clauseContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IConflict_clauseContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IConflict_clauseContext) +} + +func (s *Column_constraintContext) GENERATED_() antlr.TerminalNode { + return s.GetToken(SQLiteParserGENERATED_, 0) +} + +func (s *Column_constraintContext) ALWAYS_() antlr.TerminalNode { + return s.GetToken(SQLiteParserALWAYS_, 0) +} + +func (s *Column_constraintContext) STORED_() antlr.TerminalNode { + return s.GetToken(SQLiteParserSTORED_, 0) +} + +func (s *Column_constraintContext) VIRTUAL_() antlr.TerminalNode { + return s.GetToken(SQLiteParserVIRTUAL_, 0) +} + +func (s *Column_constraintContext) Asc_desc() IAsc_descContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IAsc_descContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IAsc_descContext) +} + +func (s *Column_constraintContext) AUTOINCREMENT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserAUTOINCREMENT_, 0) +} + +func (s *Column_constraintContext) NOT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserNOT_, 0) +} + +func (s *Column_constraintContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Column_constraintContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Column_constraintContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterColumn_constraint(s) + } +} + +func (s *Column_constraintContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitColumn_constraint(s) + } +} + +func (p *SQLiteParser) Column_constraint() (localctx IColumn_constraintContext) { + this := p + _ = this + + localctx = NewColumn_constraintContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 32, SQLiteParserRULE_column_constraint) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + p.SetState(488) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserCONSTRAINT_ { + { + p.SetState(486) + p.Match(SQLiteParserCONSTRAINT_) + } + { + p.SetState(487) + p.Name() + } + + } + p.SetState(539) + p.GetErrorHandler().Sync(p) + + switch p.GetTokenStream().LA(1) { + case SQLiteParserPRIMARY_: + { + p.SetState(490) + p.Match(SQLiteParserPRIMARY_) + } + { + p.SetState(491) + p.Match(SQLiteParserKEY_) + } + p.SetState(493) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserASC_ || _la == SQLiteParserDESC_ { + { + p.SetState(492) + p.Asc_desc() + } + + } + p.SetState(496) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserON_ { + { + p.SetState(495) + p.Conflict_clause() + } + + } + p.SetState(499) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserAUTOINCREMENT_ { + { + p.SetState(498) + p.Match(SQLiteParserAUTOINCREMENT_) + } + + } + + case SQLiteParserNOT_, SQLiteParserNULL_, SQLiteParserUNIQUE_: + p.SetState(506) + p.GetErrorHandler().Sync(p) + + switch p.GetTokenStream().LA(1) { + case SQLiteParserNOT_, SQLiteParserNULL_: + p.SetState(502) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserNOT_ { + { + p.SetState(501) + p.Match(SQLiteParserNOT_) + } + + } + { + p.SetState(504) + p.Match(SQLiteParserNULL_) + } + + case SQLiteParserUNIQUE_: + { + p.SetState(505) + p.Match(SQLiteParserUNIQUE_) + } + + default: + panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) + } + p.SetState(509) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserON_ { + { + p.SetState(508) + p.Conflict_clause() + } + + } + + case SQLiteParserCHECK_: + { + p.SetState(511) + p.Match(SQLiteParserCHECK_) + } + { + p.SetState(512) + p.Match(SQLiteParserOPEN_PAR) + } + { + p.SetState(513) + p.expr(0) + } + { + p.SetState(514) + p.Match(SQLiteParserCLOSE_PAR) + } + + case SQLiteParserDEFAULT_: + { + p.SetState(516) + p.Match(SQLiteParserDEFAULT_) + } + p.SetState(523) + p.GetErrorHandler().Sync(p) + switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 51, p.GetParserRuleContext()) { + case 1: + { + p.SetState(517) + p.Signed_number() + } + + case 2: + { + p.SetState(518) + p.Literal_value() + } + + case 3: + { + p.SetState(519) + p.Match(SQLiteParserOPEN_PAR) + } + { + p.SetState(520) + p.expr(0) + } + { + p.SetState(521) + p.Match(SQLiteParserCLOSE_PAR) + } + + } + + case SQLiteParserCOLLATE_: + { + p.SetState(525) + p.Match(SQLiteParserCOLLATE_) + } + { + p.SetState(526) + p.Collation_name() + } + + case SQLiteParserREFERENCES_: + { + p.SetState(527) + p.Foreign_key_clause() + } + + case SQLiteParserAS_, SQLiteParserGENERATED_: + p.SetState(530) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserGENERATED_ { + { + p.SetState(528) + p.Match(SQLiteParserGENERATED_) + } + { + p.SetState(529) + p.Match(SQLiteParserALWAYS_) + } + + } + { + p.SetState(532) + p.Match(SQLiteParserAS_) + } + { + p.SetState(533) + p.Match(SQLiteParserOPEN_PAR) + } + { + p.SetState(534) + p.expr(0) + } + { + p.SetState(535) + p.Match(SQLiteParserCLOSE_PAR) + } + p.SetState(537) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserVIRTUAL_ || _la == SQLiteParserSTORED_ { + { + p.SetState(536) + _la = p.GetTokenStream().LA(1) + + if !(_la == SQLiteParserVIRTUAL_ || _la == SQLiteParserSTORED_) { + p.GetErrorHandler().RecoverInline(p) + } else { + p.GetErrorHandler().ReportMatch(p) + p.Consume() + } + } + + } + + default: + panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) + } + + return localctx +} + +// ISigned_numberContext is an interface to support dynamic dispatch. +type ISigned_numberContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + NUMERIC_LITERAL() antlr.TerminalNode + PLUS() antlr.TerminalNode + MINUS() antlr.TerminalNode + + // IsSigned_numberContext differentiates from other interfaces. + IsSigned_numberContext() +} + +type Signed_numberContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptySigned_numberContext() *Signed_numberContext { + var p = new(Signed_numberContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_signed_number + return p +} + +func (*Signed_numberContext) IsSigned_numberContext() {} + +func NewSigned_numberContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Signed_numberContext { + var p = new(Signed_numberContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_signed_number + + return p +} + +func (s *Signed_numberContext) GetParser() antlr.Parser { return s.parser } + +func (s *Signed_numberContext) NUMERIC_LITERAL() antlr.TerminalNode { + return s.GetToken(SQLiteParserNUMERIC_LITERAL, 0) +} + +func (s *Signed_numberContext) PLUS() antlr.TerminalNode { + return s.GetToken(SQLiteParserPLUS, 0) +} + +func (s *Signed_numberContext) MINUS() antlr.TerminalNode { + return s.GetToken(SQLiteParserMINUS, 0) +} + +func (s *Signed_numberContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Signed_numberContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Signed_numberContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterSigned_number(s) + } +} + +func (s *Signed_numberContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitSigned_number(s) + } +} + +func (p *SQLiteParser) Signed_number() (localctx ISigned_numberContext) { + this := p + _ = this + + localctx = NewSigned_numberContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 34, SQLiteParserRULE_signed_number) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + p.SetState(542) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserPLUS || _la == SQLiteParserMINUS { + { + p.SetState(541) + _la = p.GetTokenStream().LA(1) + + if !(_la == SQLiteParserPLUS || _la == SQLiteParserMINUS) { + p.GetErrorHandler().RecoverInline(p) + } else { + p.GetErrorHandler().ReportMatch(p) + p.Consume() + } + } + + } + { + p.SetState(544) + p.Match(SQLiteParserNUMERIC_LITERAL) + } + + return localctx +} + +// ITable_constraintContext is an interface to support dynamic dispatch. +type ITable_constraintContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + OPEN_PAR() antlr.TerminalNode + AllIndexed_column() []IIndexed_columnContext + Indexed_column(i int) IIndexed_columnContext + CLOSE_PAR() antlr.TerminalNode + CHECK_() antlr.TerminalNode + Expr() IExprContext + FOREIGN_() antlr.TerminalNode + KEY_() antlr.TerminalNode + AllColumn_name() []IColumn_nameContext + Column_name(i int) IColumn_nameContext + Foreign_key_clause() IForeign_key_clauseContext + CONSTRAINT_() antlr.TerminalNode + Name() INameContext + PRIMARY_() antlr.TerminalNode + UNIQUE_() antlr.TerminalNode + AllCOMMA() []antlr.TerminalNode + COMMA(i int) antlr.TerminalNode + Conflict_clause() IConflict_clauseContext + + // IsTable_constraintContext differentiates from other interfaces. + IsTable_constraintContext() +} + +type Table_constraintContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyTable_constraintContext() *Table_constraintContext { + var p = new(Table_constraintContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_table_constraint + return p +} + +func (*Table_constraintContext) IsTable_constraintContext() {} + +func NewTable_constraintContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Table_constraintContext { + var p = new(Table_constraintContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_table_constraint + + return p +} + +func (s *Table_constraintContext) GetParser() antlr.Parser { return s.parser } + +func (s *Table_constraintContext) OPEN_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserOPEN_PAR, 0) +} + +func (s *Table_constraintContext) AllIndexed_column() []IIndexed_columnContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IIndexed_columnContext); ok { + len++ + } + } + + tst := make([]IIndexed_columnContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IIndexed_columnContext); ok { + tst[i] = t.(IIndexed_columnContext) + i++ + } + } + + return tst +} + +func (s *Table_constraintContext) Indexed_column(i int) IIndexed_columnContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IIndexed_columnContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IIndexed_columnContext) +} + +func (s *Table_constraintContext) CLOSE_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserCLOSE_PAR, 0) +} + +func (s *Table_constraintContext) CHECK_() antlr.TerminalNode { + return s.GetToken(SQLiteParserCHECK_, 0) +} + +func (s *Table_constraintContext) Expr() IExprContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IExprContext) +} + +func (s *Table_constraintContext) FOREIGN_() antlr.TerminalNode { + return s.GetToken(SQLiteParserFOREIGN_, 0) +} + +func (s *Table_constraintContext) KEY_() antlr.TerminalNode { + return s.GetToken(SQLiteParserKEY_, 0) +} + +func (s *Table_constraintContext) AllColumn_name() []IColumn_nameContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IColumn_nameContext); ok { + len++ + } + } + + tst := make([]IColumn_nameContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IColumn_nameContext); ok { + tst[i] = t.(IColumn_nameContext) + i++ + } + } + + return tst +} + +func (s *Table_constraintContext) Column_name(i int) IColumn_nameContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IColumn_nameContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IColumn_nameContext) +} + +func (s *Table_constraintContext) Foreign_key_clause() IForeign_key_clauseContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IForeign_key_clauseContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IForeign_key_clauseContext) +} + +func (s *Table_constraintContext) CONSTRAINT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserCONSTRAINT_, 0) +} + +func (s *Table_constraintContext) Name() INameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(INameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(INameContext) +} + +func (s *Table_constraintContext) PRIMARY_() antlr.TerminalNode { + return s.GetToken(SQLiteParserPRIMARY_, 0) +} + +func (s *Table_constraintContext) UNIQUE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserUNIQUE_, 0) +} + +func (s *Table_constraintContext) AllCOMMA() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserCOMMA) +} + +func (s *Table_constraintContext) COMMA(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserCOMMA, i) +} + +func (s *Table_constraintContext) Conflict_clause() IConflict_clauseContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IConflict_clauseContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IConflict_clauseContext) +} + +func (s *Table_constraintContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Table_constraintContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Table_constraintContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterTable_constraint(s) + } +} + +func (s *Table_constraintContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitTable_constraint(s) + } +} + +func (p *SQLiteParser) Table_constraint() (localctx ITable_constraintContext) { + this := p + _ = this + + localctx = NewTable_constraintContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 36, SQLiteParserRULE_table_constraint) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + p.SetState(548) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserCONSTRAINT_ { + { + p.SetState(546) + p.Match(SQLiteParserCONSTRAINT_) + } + { + p.SetState(547) + p.Name() + } + + } + p.SetState(587) + p.GetErrorHandler().Sync(p) + + switch p.GetTokenStream().LA(1) { + case SQLiteParserPRIMARY_, SQLiteParserUNIQUE_: + p.SetState(553) + p.GetErrorHandler().Sync(p) + + switch p.GetTokenStream().LA(1) { + case SQLiteParserPRIMARY_: + { + p.SetState(550) + p.Match(SQLiteParserPRIMARY_) + } + { + p.SetState(551) + p.Match(SQLiteParserKEY_) + } + + case SQLiteParserUNIQUE_: + { + p.SetState(552) + p.Match(SQLiteParserUNIQUE_) + } + + default: + panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) + } + { + p.SetState(555) + p.Match(SQLiteParserOPEN_PAR) + } + { + p.SetState(556) + p.Indexed_column() + } + p.SetState(561) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for _la == SQLiteParserCOMMA { + { + p.SetState(557) + p.Match(SQLiteParserCOMMA) + } + { + p.SetState(558) + p.Indexed_column() + } + + p.SetState(563) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + { + p.SetState(564) + p.Match(SQLiteParserCLOSE_PAR) + } + p.SetState(566) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserON_ { + { + p.SetState(565) + p.Conflict_clause() + } + + } + + case SQLiteParserCHECK_: + { + p.SetState(568) + p.Match(SQLiteParserCHECK_) + } + { + p.SetState(569) + p.Match(SQLiteParserOPEN_PAR) + } + { + p.SetState(570) + p.expr(0) + } + { + p.SetState(571) + p.Match(SQLiteParserCLOSE_PAR) + } + + case SQLiteParserFOREIGN_: + { + p.SetState(573) + p.Match(SQLiteParserFOREIGN_) + } + { + p.SetState(574) + p.Match(SQLiteParserKEY_) + } + { + p.SetState(575) + p.Match(SQLiteParserOPEN_PAR) + } + { + p.SetState(576) + p.Column_name() + } + p.SetState(581) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for _la == SQLiteParserCOMMA { + { + p.SetState(577) + p.Match(SQLiteParserCOMMA) + } + { + p.SetState(578) + p.Column_name() + } + + p.SetState(583) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + { + p.SetState(584) + p.Match(SQLiteParserCLOSE_PAR) + } + { + p.SetState(585) + p.Foreign_key_clause() + } + + default: + panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) + } + + return localctx +} + +// IForeign_key_clauseContext is an interface to support dynamic dispatch. +type IForeign_key_clauseContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + REFERENCES_() antlr.TerminalNode + Foreign_table() IForeign_tableContext + OPEN_PAR() antlr.TerminalNode + AllColumn_name() []IColumn_nameContext + Column_name(i int) IColumn_nameContext + CLOSE_PAR() antlr.TerminalNode + AllON_() []antlr.TerminalNode + ON_(i int) antlr.TerminalNode + AllMATCH_() []antlr.TerminalNode + MATCH_(i int) antlr.TerminalNode + AllName() []INameContext + Name(i int) INameContext + DEFERRABLE_() antlr.TerminalNode + AllDELETE_() []antlr.TerminalNode + DELETE_(i int) antlr.TerminalNode + AllUPDATE_() []antlr.TerminalNode + UPDATE_(i int) antlr.TerminalNode + AllSET_() []antlr.TerminalNode + SET_(i int) antlr.TerminalNode + AllCASCADE_() []antlr.TerminalNode + CASCADE_(i int) antlr.TerminalNode + AllRESTRICT_() []antlr.TerminalNode + RESTRICT_(i int) antlr.TerminalNode + AllNO_() []antlr.TerminalNode + NO_(i int) antlr.TerminalNode + AllACTION_() []antlr.TerminalNode + ACTION_(i int) antlr.TerminalNode + AllCOMMA() []antlr.TerminalNode + COMMA(i int) antlr.TerminalNode + AllNULL_() []antlr.TerminalNode + NULL_(i int) antlr.TerminalNode + AllDEFAULT_() []antlr.TerminalNode + DEFAULT_(i int) antlr.TerminalNode + NOT_() antlr.TerminalNode + INITIALLY_() antlr.TerminalNode + DEFERRED_() antlr.TerminalNode + IMMEDIATE_() antlr.TerminalNode + + // IsForeign_key_clauseContext differentiates from other interfaces. + IsForeign_key_clauseContext() +} + +type Foreign_key_clauseContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyForeign_key_clauseContext() *Foreign_key_clauseContext { + var p = new(Foreign_key_clauseContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_foreign_key_clause + return p +} + +func (*Foreign_key_clauseContext) IsForeign_key_clauseContext() {} + +func NewForeign_key_clauseContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Foreign_key_clauseContext { + var p = new(Foreign_key_clauseContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_foreign_key_clause + + return p +} + +func (s *Foreign_key_clauseContext) GetParser() antlr.Parser { return s.parser } + +func (s *Foreign_key_clauseContext) REFERENCES_() antlr.TerminalNode { + return s.GetToken(SQLiteParserREFERENCES_, 0) +} + +func (s *Foreign_key_clauseContext) Foreign_table() IForeign_tableContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IForeign_tableContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IForeign_tableContext) +} + +func (s *Foreign_key_clauseContext) OPEN_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserOPEN_PAR, 0) +} + +func (s *Foreign_key_clauseContext) AllColumn_name() []IColumn_nameContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IColumn_nameContext); ok { + len++ + } + } + + tst := make([]IColumn_nameContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IColumn_nameContext); ok { + tst[i] = t.(IColumn_nameContext) + i++ + } + } + + return tst +} + +func (s *Foreign_key_clauseContext) Column_name(i int) IColumn_nameContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IColumn_nameContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IColumn_nameContext) +} + +func (s *Foreign_key_clauseContext) CLOSE_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserCLOSE_PAR, 0) +} + +func (s *Foreign_key_clauseContext) AllON_() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserON_) +} + +func (s *Foreign_key_clauseContext) ON_(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserON_, i) +} + +func (s *Foreign_key_clauseContext) AllMATCH_() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserMATCH_) +} + +func (s *Foreign_key_clauseContext) MATCH_(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserMATCH_, i) +} + +func (s *Foreign_key_clauseContext) AllName() []INameContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(INameContext); ok { + len++ + } + } + + tst := make([]INameContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(INameContext); ok { + tst[i] = t.(INameContext) + i++ + } + } + + return tst +} + +func (s *Foreign_key_clauseContext) Name(i int) INameContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(INameContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(INameContext) +} + +func (s *Foreign_key_clauseContext) DEFERRABLE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserDEFERRABLE_, 0) +} + +func (s *Foreign_key_clauseContext) AllDELETE_() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserDELETE_) +} + +func (s *Foreign_key_clauseContext) DELETE_(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserDELETE_, i) +} + +func (s *Foreign_key_clauseContext) AllUPDATE_() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserUPDATE_) +} + +func (s *Foreign_key_clauseContext) UPDATE_(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserUPDATE_, i) +} + +func (s *Foreign_key_clauseContext) AllSET_() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserSET_) +} + +func (s *Foreign_key_clauseContext) SET_(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserSET_, i) +} + +func (s *Foreign_key_clauseContext) AllCASCADE_() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserCASCADE_) +} + +func (s *Foreign_key_clauseContext) CASCADE_(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserCASCADE_, i) +} + +func (s *Foreign_key_clauseContext) AllRESTRICT_() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserRESTRICT_) +} + +func (s *Foreign_key_clauseContext) RESTRICT_(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserRESTRICT_, i) +} + +func (s *Foreign_key_clauseContext) AllNO_() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserNO_) +} + +func (s *Foreign_key_clauseContext) NO_(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserNO_, i) +} + +func (s *Foreign_key_clauseContext) AllACTION_() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserACTION_) +} + +func (s *Foreign_key_clauseContext) ACTION_(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserACTION_, i) +} + +func (s *Foreign_key_clauseContext) AllCOMMA() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserCOMMA) +} + +func (s *Foreign_key_clauseContext) COMMA(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserCOMMA, i) +} + +func (s *Foreign_key_clauseContext) AllNULL_() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserNULL_) +} + +func (s *Foreign_key_clauseContext) NULL_(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserNULL_, i) +} + +func (s *Foreign_key_clauseContext) AllDEFAULT_() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserDEFAULT_) +} + +func (s *Foreign_key_clauseContext) DEFAULT_(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserDEFAULT_, i) +} + +func (s *Foreign_key_clauseContext) NOT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserNOT_, 0) +} + +func (s *Foreign_key_clauseContext) INITIALLY_() antlr.TerminalNode { + return s.GetToken(SQLiteParserINITIALLY_, 0) +} + +func (s *Foreign_key_clauseContext) DEFERRED_() antlr.TerminalNode { + return s.GetToken(SQLiteParserDEFERRED_, 0) +} + +func (s *Foreign_key_clauseContext) IMMEDIATE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserIMMEDIATE_, 0) +} + +func (s *Foreign_key_clauseContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Foreign_key_clauseContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Foreign_key_clauseContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterForeign_key_clause(s) + } +} + +func (s *Foreign_key_clauseContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitForeign_key_clause(s) + } +} + +func (p *SQLiteParser) Foreign_key_clause() (localctx IForeign_key_clauseContext) { + this := p + _ = this + + localctx = NewForeign_key_clauseContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 38, SQLiteParserRULE_foreign_key_clause) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(589) + p.Match(SQLiteParserREFERENCES_) + } + { + p.SetState(590) + p.Foreign_table() + } + p.SetState(602) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserOPEN_PAR { + { + p.SetState(591) + p.Match(SQLiteParserOPEN_PAR) + } + { + p.SetState(592) + p.Column_name() + } + p.SetState(597) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for _la == SQLiteParserCOMMA { + { + p.SetState(593) + p.Match(SQLiteParserCOMMA) + } + { + p.SetState(594) + p.Column_name() + } + + p.SetState(599) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + { + p.SetState(600) + p.Match(SQLiteParserCLOSE_PAR) + } + + } + p.SetState(618) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for _la == SQLiteParserMATCH_ || _la == SQLiteParserON_ { + p.SetState(616) + p.GetErrorHandler().Sync(p) + + switch p.GetTokenStream().LA(1) { + case SQLiteParserON_: + { + p.SetState(604) + p.Match(SQLiteParserON_) + } + { + p.SetState(605) + _la = p.GetTokenStream().LA(1) + + if !(_la == SQLiteParserDELETE_ || _la == SQLiteParserUPDATE_) { + p.GetErrorHandler().RecoverInline(p) + } else { + p.GetErrorHandler().ReportMatch(p) + p.Consume() + } + } + p.SetState(612) + p.GetErrorHandler().Sync(p) + + switch p.GetTokenStream().LA(1) { + case SQLiteParserSET_: + { + p.SetState(606) + p.Match(SQLiteParserSET_) + } + { + p.SetState(607) + _la = p.GetTokenStream().LA(1) + + if !(_la == SQLiteParserDEFAULT_ || _la == SQLiteParserNULL_) { + p.GetErrorHandler().RecoverInline(p) + } else { + p.GetErrorHandler().ReportMatch(p) + p.Consume() + } + } + + case SQLiteParserCASCADE_: + { + p.SetState(608) + p.Match(SQLiteParserCASCADE_) + } + + case SQLiteParserRESTRICT_: + { + p.SetState(609) + p.Match(SQLiteParserRESTRICT_) + } + + case SQLiteParserNO_: + { + p.SetState(610) + p.Match(SQLiteParserNO_) + } + { + p.SetState(611) + p.Match(SQLiteParserACTION_) + } + + default: + panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) + } + + case SQLiteParserMATCH_: + { + p.SetState(614) + p.Match(SQLiteParserMATCH_) + } + { + p.SetState(615) + p.Name() + } + + default: + panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) + } + + p.SetState(620) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + p.SetState(629) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 69, p.GetParserRuleContext()) == 1 { + p.SetState(622) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserNOT_ { + { + p.SetState(621) + p.Match(SQLiteParserNOT_) + } + + } + { + p.SetState(624) + p.Match(SQLiteParserDEFERRABLE_) + } + p.SetState(627) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserINITIALLY_ { + { + p.SetState(625) + p.Match(SQLiteParserINITIALLY_) + } + { + p.SetState(626) + _la = p.GetTokenStream().LA(1) + + if !(_la == SQLiteParserDEFERRED_ || _la == SQLiteParserIMMEDIATE_) { + p.GetErrorHandler().RecoverInline(p) + } else { + p.GetErrorHandler().ReportMatch(p) + p.Consume() + } + } + + } + + } + + return localctx +} + +// IConflict_clauseContext is an interface to support dynamic dispatch. +type IConflict_clauseContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + ON_() antlr.TerminalNode + CONFLICT_() antlr.TerminalNode + ROLLBACK_() antlr.TerminalNode + ABORT_() antlr.TerminalNode + FAIL_() antlr.TerminalNode + IGNORE_() antlr.TerminalNode + REPLACE_() antlr.TerminalNode + + // IsConflict_clauseContext differentiates from other interfaces. + IsConflict_clauseContext() +} + +type Conflict_clauseContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyConflict_clauseContext() *Conflict_clauseContext { + var p = new(Conflict_clauseContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_conflict_clause + return p +} + +func (*Conflict_clauseContext) IsConflict_clauseContext() {} + +func NewConflict_clauseContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Conflict_clauseContext { + var p = new(Conflict_clauseContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_conflict_clause + + return p +} + +func (s *Conflict_clauseContext) GetParser() antlr.Parser { return s.parser } + +func (s *Conflict_clauseContext) ON_() antlr.TerminalNode { + return s.GetToken(SQLiteParserON_, 0) +} + +func (s *Conflict_clauseContext) CONFLICT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserCONFLICT_, 0) +} + +func (s *Conflict_clauseContext) ROLLBACK_() antlr.TerminalNode { + return s.GetToken(SQLiteParserROLLBACK_, 0) +} + +func (s *Conflict_clauseContext) ABORT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserABORT_, 0) +} + +func (s *Conflict_clauseContext) FAIL_() antlr.TerminalNode { + return s.GetToken(SQLiteParserFAIL_, 0) +} + +func (s *Conflict_clauseContext) IGNORE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserIGNORE_, 0) +} + +func (s *Conflict_clauseContext) REPLACE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserREPLACE_, 0) +} + +func (s *Conflict_clauseContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Conflict_clauseContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Conflict_clauseContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterConflict_clause(s) + } +} + +func (s *Conflict_clauseContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitConflict_clause(s) + } +} + +func (p *SQLiteParser) Conflict_clause() (localctx IConflict_clauseContext) { + this := p + _ = this + + localctx = NewConflict_clauseContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 40, SQLiteParserRULE_conflict_clause) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(631) + p.Match(SQLiteParserON_) + } + { + p.SetState(632) + p.Match(SQLiteParserCONFLICT_) + } + { + p.SetState(633) + _la = p.GetTokenStream().LA(1) + + if !(_la == SQLiteParserABORT_ || ((int64((_la-72)) & ^0x3f) == 0 && ((int64(1)<<(_la-72))&19140298416325121) != 0)) { + p.GetErrorHandler().RecoverInline(p) + } else { + p.GetErrorHandler().ReportMatch(p) + p.Consume() + } + } + + return localctx +} + +// ICreate_trigger_stmtContext is an interface to support dynamic dispatch. +type ICreate_trigger_stmtContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + CREATE_() antlr.TerminalNode + TRIGGER_() antlr.TerminalNode + Trigger_name() ITrigger_nameContext + ON_() antlr.TerminalNode + Table_name() ITable_nameContext + BEGIN_() antlr.TerminalNode + END_() antlr.TerminalNode + DELETE_() antlr.TerminalNode + INSERT_() antlr.TerminalNode + UPDATE_() antlr.TerminalNode + IF_() antlr.TerminalNode + NOT_() antlr.TerminalNode + EXISTS_() antlr.TerminalNode + Schema_name() ISchema_nameContext + DOT() antlr.TerminalNode + BEFORE_() antlr.TerminalNode + AFTER_() antlr.TerminalNode + INSTEAD_() antlr.TerminalNode + AllOF_() []antlr.TerminalNode + OF_(i int) antlr.TerminalNode + FOR_() antlr.TerminalNode + EACH_() antlr.TerminalNode + ROW_() antlr.TerminalNode + WHEN_() antlr.TerminalNode + Expr() IExprContext + AllSCOL() []antlr.TerminalNode + SCOL(i int) antlr.TerminalNode + TEMP_() antlr.TerminalNode + TEMPORARY_() antlr.TerminalNode + AllColumn_name() []IColumn_nameContext + Column_name(i int) IColumn_nameContext + AllUpdate_stmt() []IUpdate_stmtContext + Update_stmt(i int) IUpdate_stmtContext + AllInsert_stmt() []IInsert_stmtContext + Insert_stmt(i int) IInsert_stmtContext + AllDelete_stmt() []IDelete_stmtContext + Delete_stmt(i int) IDelete_stmtContext + AllSelect_stmt() []ISelect_stmtContext + Select_stmt(i int) ISelect_stmtContext + AllCOMMA() []antlr.TerminalNode + COMMA(i int) antlr.TerminalNode + + // IsCreate_trigger_stmtContext differentiates from other interfaces. + IsCreate_trigger_stmtContext() +} + +type Create_trigger_stmtContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyCreate_trigger_stmtContext() *Create_trigger_stmtContext { + var p = new(Create_trigger_stmtContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_create_trigger_stmt + return p +} + +func (*Create_trigger_stmtContext) IsCreate_trigger_stmtContext() {} + +func NewCreate_trigger_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Create_trigger_stmtContext { + var p = new(Create_trigger_stmtContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_create_trigger_stmt + + return p +} + +func (s *Create_trigger_stmtContext) GetParser() antlr.Parser { return s.parser } + +func (s *Create_trigger_stmtContext) CREATE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserCREATE_, 0) +} + +func (s *Create_trigger_stmtContext) TRIGGER_() antlr.TerminalNode { + return s.GetToken(SQLiteParserTRIGGER_, 0) +} + +func (s *Create_trigger_stmtContext) Trigger_name() ITrigger_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ITrigger_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ITrigger_nameContext) +} + +func (s *Create_trigger_stmtContext) ON_() antlr.TerminalNode { + return s.GetToken(SQLiteParserON_, 0) +} + +func (s *Create_trigger_stmtContext) Table_name() ITable_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ITable_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ITable_nameContext) +} + +func (s *Create_trigger_stmtContext) BEGIN_() antlr.TerminalNode { + return s.GetToken(SQLiteParserBEGIN_, 0) +} + +func (s *Create_trigger_stmtContext) END_() antlr.TerminalNode { + return s.GetToken(SQLiteParserEND_, 0) +} + +func (s *Create_trigger_stmtContext) DELETE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserDELETE_, 0) +} + +func (s *Create_trigger_stmtContext) INSERT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserINSERT_, 0) +} + +func (s *Create_trigger_stmtContext) UPDATE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserUPDATE_, 0) +} + +func (s *Create_trigger_stmtContext) IF_() antlr.TerminalNode { + return s.GetToken(SQLiteParserIF_, 0) +} + +func (s *Create_trigger_stmtContext) NOT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserNOT_, 0) +} + +func (s *Create_trigger_stmtContext) EXISTS_() antlr.TerminalNode { + return s.GetToken(SQLiteParserEXISTS_, 0) +} + +func (s *Create_trigger_stmtContext) Schema_name() ISchema_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ISchema_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ISchema_nameContext) +} + +func (s *Create_trigger_stmtContext) DOT() antlr.TerminalNode { + return s.GetToken(SQLiteParserDOT, 0) +} + +func (s *Create_trigger_stmtContext) BEFORE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserBEFORE_, 0) +} + +func (s *Create_trigger_stmtContext) AFTER_() antlr.TerminalNode { + return s.GetToken(SQLiteParserAFTER_, 0) +} + +func (s *Create_trigger_stmtContext) INSTEAD_() antlr.TerminalNode { + return s.GetToken(SQLiteParserINSTEAD_, 0) +} + +func (s *Create_trigger_stmtContext) AllOF_() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserOF_) +} + +func (s *Create_trigger_stmtContext) OF_(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserOF_, i) +} + +func (s *Create_trigger_stmtContext) FOR_() antlr.TerminalNode { + return s.GetToken(SQLiteParserFOR_, 0) +} + +func (s *Create_trigger_stmtContext) EACH_() antlr.TerminalNode { + return s.GetToken(SQLiteParserEACH_, 0) +} + +func (s *Create_trigger_stmtContext) ROW_() antlr.TerminalNode { + return s.GetToken(SQLiteParserROW_, 0) +} + +func (s *Create_trigger_stmtContext) WHEN_() antlr.TerminalNode { + return s.GetToken(SQLiteParserWHEN_, 0) +} + +func (s *Create_trigger_stmtContext) Expr() IExprContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IExprContext) +} + +func (s *Create_trigger_stmtContext) AllSCOL() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserSCOL) +} + +func (s *Create_trigger_stmtContext) SCOL(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserSCOL, i) +} + +func (s *Create_trigger_stmtContext) TEMP_() antlr.TerminalNode { + return s.GetToken(SQLiteParserTEMP_, 0) +} + +func (s *Create_trigger_stmtContext) TEMPORARY_() antlr.TerminalNode { + return s.GetToken(SQLiteParserTEMPORARY_, 0) +} + +func (s *Create_trigger_stmtContext) AllColumn_name() []IColumn_nameContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IColumn_nameContext); ok { + len++ + } + } + + tst := make([]IColumn_nameContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IColumn_nameContext); ok { + tst[i] = t.(IColumn_nameContext) + i++ + } + } + + return tst +} + +func (s *Create_trigger_stmtContext) Column_name(i int) IColumn_nameContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IColumn_nameContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IColumn_nameContext) +} + +func (s *Create_trigger_stmtContext) AllUpdate_stmt() []IUpdate_stmtContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IUpdate_stmtContext); ok { + len++ + } + } + + tst := make([]IUpdate_stmtContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IUpdate_stmtContext); ok { + tst[i] = t.(IUpdate_stmtContext) + i++ + } + } + + return tst +} + +func (s *Create_trigger_stmtContext) Update_stmt(i int) IUpdate_stmtContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IUpdate_stmtContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IUpdate_stmtContext) +} + +func (s *Create_trigger_stmtContext) AllInsert_stmt() []IInsert_stmtContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IInsert_stmtContext); ok { + len++ + } + } + + tst := make([]IInsert_stmtContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IInsert_stmtContext); ok { + tst[i] = t.(IInsert_stmtContext) + i++ + } + } + + return tst +} + +func (s *Create_trigger_stmtContext) Insert_stmt(i int) IInsert_stmtContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IInsert_stmtContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IInsert_stmtContext) +} + +func (s *Create_trigger_stmtContext) AllDelete_stmt() []IDelete_stmtContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IDelete_stmtContext); ok { + len++ + } + } + + tst := make([]IDelete_stmtContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IDelete_stmtContext); ok { + tst[i] = t.(IDelete_stmtContext) + i++ + } + } + + return tst +} + +func (s *Create_trigger_stmtContext) Delete_stmt(i int) IDelete_stmtContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IDelete_stmtContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IDelete_stmtContext) +} + +func (s *Create_trigger_stmtContext) AllSelect_stmt() []ISelect_stmtContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(ISelect_stmtContext); ok { + len++ + } + } + + tst := make([]ISelect_stmtContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(ISelect_stmtContext); ok { + tst[i] = t.(ISelect_stmtContext) + i++ + } + } + + return tst +} + +func (s *Create_trigger_stmtContext) Select_stmt(i int) ISelect_stmtContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ISelect_stmtContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(ISelect_stmtContext) +} + +func (s *Create_trigger_stmtContext) AllCOMMA() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserCOMMA) +} + +func (s *Create_trigger_stmtContext) COMMA(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserCOMMA, i) +} + +func (s *Create_trigger_stmtContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Create_trigger_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Create_trigger_stmtContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterCreate_trigger_stmt(s) + } +} + +func (s *Create_trigger_stmtContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitCreate_trigger_stmt(s) + } +} + +func (p *SQLiteParser) Create_trigger_stmt() (localctx ICreate_trigger_stmtContext) { + this := p + _ = this + + localctx = NewCreate_trigger_stmtContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 42, SQLiteParserRULE_create_trigger_stmt) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(635) + p.Match(SQLiteParserCREATE_) + } + p.SetState(637) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserTEMP_ || _la == SQLiteParserTEMPORARY_ { + { + p.SetState(636) + _la = p.GetTokenStream().LA(1) + + if !(_la == SQLiteParserTEMP_ || _la == SQLiteParserTEMPORARY_) { + p.GetErrorHandler().RecoverInline(p) + } else { + p.GetErrorHandler().ReportMatch(p) + p.Consume() + } + } + + } + { + p.SetState(639) + p.Match(SQLiteParserTRIGGER_) + } + p.SetState(643) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 71, p.GetParserRuleContext()) == 1 { + { + p.SetState(640) + p.Match(SQLiteParserIF_) + } + { + p.SetState(641) + p.Match(SQLiteParserNOT_) + } + { + p.SetState(642) + p.Match(SQLiteParserEXISTS_) + } + + } + p.SetState(648) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 72, p.GetParserRuleContext()) == 1 { + { + p.SetState(645) + p.Schema_name() + } + { + p.SetState(646) + p.Match(SQLiteParserDOT) + } + + } + { + p.SetState(650) + p.Trigger_name() + } + p.SetState(655) + p.GetErrorHandler().Sync(p) + + switch p.GetTokenStream().LA(1) { + case SQLiteParserBEFORE_: + { + p.SetState(651) + p.Match(SQLiteParserBEFORE_) + } + + case SQLiteParserAFTER_: + { + p.SetState(652) + p.Match(SQLiteParserAFTER_) + } + + case SQLiteParserINSTEAD_: + { + p.SetState(653) + p.Match(SQLiteParserINSTEAD_) + } + { + p.SetState(654) + p.Match(SQLiteParserOF_) + } + + case SQLiteParserDELETE_, SQLiteParserINSERT_, SQLiteParserUPDATE_: + + default: + } + p.SetState(671) + p.GetErrorHandler().Sync(p) + + switch p.GetTokenStream().LA(1) { + case SQLiteParserDELETE_: + { + p.SetState(657) + p.Match(SQLiteParserDELETE_) + } + + case SQLiteParserINSERT_: + { + p.SetState(658) + p.Match(SQLiteParserINSERT_) + } + + case SQLiteParserUPDATE_: + { + p.SetState(659) + p.Match(SQLiteParserUPDATE_) + } + p.SetState(669) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserOF_ { + { + p.SetState(660) + p.Match(SQLiteParserOF_) + } + { + p.SetState(661) + p.Column_name() + } + p.SetState(666) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for _la == SQLiteParserCOMMA { + { + p.SetState(662) + p.Match(SQLiteParserCOMMA) + } + { + p.SetState(663) + p.Column_name() + } + + p.SetState(668) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + + } + + default: + panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) + } + { + p.SetState(673) + p.Match(SQLiteParserON_) + } + { + p.SetState(674) + p.Table_name() + } + p.SetState(678) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserFOR_ { + { + p.SetState(675) + p.Match(SQLiteParserFOR_) + } + { + p.SetState(676) + p.Match(SQLiteParserEACH_) + } + { + p.SetState(677) + p.Match(SQLiteParserROW_) + } + + } + p.SetState(682) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserWHEN_ { + { + p.SetState(680) + p.Match(SQLiteParserWHEN_) + } + { + p.SetState(681) + p.expr(0) + } + + } + { + p.SetState(684) + p.Match(SQLiteParserBEGIN_) + } + p.SetState(693) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for ok := true; ok; ok = _la == SQLiteParserDELETE_ || ((int64((_la-88)) & ^0x3f) == 0 && ((int64(1)<<(_la-88))&2386912217732743169) != 0) { + p.SetState(689) + p.GetErrorHandler().Sync(p) + switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 79, p.GetParserRuleContext()) { + case 1: + { + p.SetState(685) + p.Update_stmt() + } + + case 2: + { + p.SetState(686) + p.Insert_stmt() + } + + case 3: + { + p.SetState(687) + p.Delete_stmt() + } + + case 4: + { + p.SetState(688) + p.Select_stmt() + } + + } + { + p.SetState(691) + p.Match(SQLiteParserSCOL) + } + + p.SetState(695) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + { + p.SetState(697) + p.Match(SQLiteParserEND_) + } + + return localctx +} + +// ICreate_view_stmtContext is an interface to support dynamic dispatch. +type ICreate_view_stmtContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + CREATE_() antlr.TerminalNode + VIEW_() antlr.TerminalNode + View_name() IView_nameContext + AS_() antlr.TerminalNode + Select_stmt() ISelect_stmtContext + IF_() antlr.TerminalNode + NOT_() antlr.TerminalNode + EXISTS_() antlr.TerminalNode + Schema_name() ISchema_nameContext + DOT() antlr.TerminalNode + OPEN_PAR() antlr.TerminalNode + AllColumn_name() []IColumn_nameContext + Column_name(i int) IColumn_nameContext + CLOSE_PAR() antlr.TerminalNode + TEMP_() antlr.TerminalNode + TEMPORARY_() antlr.TerminalNode + AllCOMMA() []antlr.TerminalNode + COMMA(i int) antlr.TerminalNode + + // IsCreate_view_stmtContext differentiates from other interfaces. + IsCreate_view_stmtContext() +} + +type Create_view_stmtContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyCreate_view_stmtContext() *Create_view_stmtContext { + var p = new(Create_view_stmtContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_create_view_stmt + return p +} + +func (*Create_view_stmtContext) IsCreate_view_stmtContext() {} + +func NewCreate_view_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Create_view_stmtContext { + var p = new(Create_view_stmtContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_create_view_stmt + + return p +} + +func (s *Create_view_stmtContext) GetParser() antlr.Parser { return s.parser } + +func (s *Create_view_stmtContext) CREATE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserCREATE_, 0) +} + +func (s *Create_view_stmtContext) VIEW_() antlr.TerminalNode { + return s.GetToken(SQLiteParserVIEW_, 0) +} + +func (s *Create_view_stmtContext) View_name() IView_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IView_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IView_nameContext) +} + +func (s *Create_view_stmtContext) AS_() antlr.TerminalNode { + return s.GetToken(SQLiteParserAS_, 0) +} + +func (s *Create_view_stmtContext) Select_stmt() ISelect_stmtContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ISelect_stmtContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ISelect_stmtContext) +} + +func (s *Create_view_stmtContext) IF_() antlr.TerminalNode { + return s.GetToken(SQLiteParserIF_, 0) +} + +func (s *Create_view_stmtContext) NOT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserNOT_, 0) +} + +func (s *Create_view_stmtContext) EXISTS_() antlr.TerminalNode { + return s.GetToken(SQLiteParserEXISTS_, 0) +} + +func (s *Create_view_stmtContext) Schema_name() ISchema_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ISchema_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ISchema_nameContext) +} + +func (s *Create_view_stmtContext) DOT() antlr.TerminalNode { + return s.GetToken(SQLiteParserDOT, 0) +} + +func (s *Create_view_stmtContext) OPEN_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserOPEN_PAR, 0) +} + +func (s *Create_view_stmtContext) AllColumn_name() []IColumn_nameContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IColumn_nameContext); ok { + len++ + } + } + + tst := make([]IColumn_nameContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IColumn_nameContext); ok { + tst[i] = t.(IColumn_nameContext) + i++ + } + } + + return tst +} + +func (s *Create_view_stmtContext) Column_name(i int) IColumn_nameContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IColumn_nameContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IColumn_nameContext) +} + +func (s *Create_view_stmtContext) CLOSE_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserCLOSE_PAR, 0) +} + +func (s *Create_view_stmtContext) TEMP_() antlr.TerminalNode { + return s.GetToken(SQLiteParserTEMP_, 0) +} + +func (s *Create_view_stmtContext) TEMPORARY_() antlr.TerminalNode { + return s.GetToken(SQLiteParserTEMPORARY_, 0) +} + +func (s *Create_view_stmtContext) AllCOMMA() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserCOMMA) +} + +func (s *Create_view_stmtContext) COMMA(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserCOMMA, i) +} + +func (s *Create_view_stmtContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Create_view_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Create_view_stmtContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterCreate_view_stmt(s) + } +} + +func (s *Create_view_stmtContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitCreate_view_stmt(s) + } +} + +func (p *SQLiteParser) Create_view_stmt() (localctx ICreate_view_stmtContext) { + this := p + _ = this + + localctx = NewCreate_view_stmtContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 44, SQLiteParserRULE_create_view_stmt) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(699) + p.Match(SQLiteParserCREATE_) + } + p.SetState(701) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserTEMP_ || _la == SQLiteParserTEMPORARY_ { + { + p.SetState(700) + _la = p.GetTokenStream().LA(1) + + if !(_la == SQLiteParserTEMP_ || _la == SQLiteParserTEMPORARY_) { + p.GetErrorHandler().RecoverInline(p) + } else { + p.GetErrorHandler().ReportMatch(p) + p.Consume() + } + } + + } + { + p.SetState(703) + p.Match(SQLiteParserVIEW_) + } + p.SetState(707) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 82, p.GetParserRuleContext()) == 1 { + { + p.SetState(704) + p.Match(SQLiteParserIF_) + } + { + p.SetState(705) + p.Match(SQLiteParserNOT_) + } + { + p.SetState(706) + p.Match(SQLiteParserEXISTS_) + } + + } + p.SetState(712) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 83, p.GetParserRuleContext()) == 1 { + { + p.SetState(709) + p.Schema_name() + } + { + p.SetState(710) + p.Match(SQLiteParserDOT) + } + + } + { + p.SetState(714) + p.View_name() + } + p.SetState(726) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserOPEN_PAR { + { + p.SetState(715) + p.Match(SQLiteParserOPEN_PAR) + } + { + p.SetState(716) + p.Column_name() + } + p.SetState(721) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for _la == SQLiteParserCOMMA { + { + p.SetState(717) + p.Match(SQLiteParserCOMMA) + } + { + p.SetState(718) + p.Column_name() + } + + p.SetState(723) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + { + p.SetState(724) + p.Match(SQLiteParserCLOSE_PAR) + } + + } + { + p.SetState(728) + p.Match(SQLiteParserAS_) + } + { + p.SetState(729) + p.Select_stmt() + } + + return localctx +} + +// ICreate_virtual_table_stmtContext is an interface to support dynamic dispatch. +type ICreate_virtual_table_stmtContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + CREATE_() antlr.TerminalNode + VIRTUAL_() antlr.TerminalNode + TABLE_() antlr.TerminalNode + Table_name() ITable_nameContext + USING_() antlr.TerminalNode + Module_name() IModule_nameContext + IF_() antlr.TerminalNode + NOT_() antlr.TerminalNode + EXISTS_() antlr.TerminalNode + Schema_name() ISchema_nameContext + DOT() antlr.TerminalNode + OPEN_PAR() antlr.TerminalNode + AllModule_argument() []IModule_argumentContext + Module_argument(i int) IModule_argumentContext + CLOSE_PAR() antlr.TerminalNode + AllCOMMA() []antlr.TerminalNode + COMMA(i int) antlr.TerminalNode + + // IsCreate_virtual_table_stmtContext differentiates from other interfaces. + IsCreate_virtual_table_stmtContext() +} + +type Create_virtual_table_stmtContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyCreate_virtual_table_stmtContext() *Create_virtual_table_stmtContext { + var p = new(Create_virtual_table_stmtContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_create_virtual_table_stmt + return p +} + +func (*Create_virtual_table_stmtContext) IsCreate_virtual_table_stmtContext() {} + +func NewCreate_virtual_table_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Create_virtual_table_stmtContext { + var p = new(Create_virtual_table_stmtContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_create_virtual_table_stmt + + return p +} + +func (s *Create_virtual_table_stmtContext) GetParser() antlr.Parser { return s.parser } + +func (s *Create_virtual_table_stmtContext) CREATE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserCREATE_, 0) +} + +func (s *Create_virtual_table_stmtContext) VIRTUAL_() antlr.TerminalNode { + return s.GetToken(SQLiteParserVIRTUAL_, 0) +} + +func (s *Create_virtual_table_stmtContext) TABLE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserTABLE_, 0) +} + +func (s *Create_virtual_table_stmtContext) Table_name() ITable_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ITable_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ITable_nameContext) +} + +func (s *Create_virtual_table_stmtContext) USING_() antlr.TerminalNode { + return s.GetToken(SQLiteParserUSING_, 0) +} + +func (s *Create_virtual_table_stmtContext) Module_name() IModule_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IModule_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IModule_nameContext) +} + +func (s *Create_virtual_table_stmtContext) IF_() antlr.TerminalNode { + return s.GetToken(SQLiteParserIF_, 0) +} + +func (s *Create_virtual_table_stmtContext) NOT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserNOT_, 0) +} + +func (s *Create_virtual_table_stmtContext) EXISTS_() antlr.TerminalNode { + return s.GetToken(SQLiteParserEXISTS_, 0) +} + +func (s *Create_virtual_table_stmtContext) Schema_name() ISchema_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ISchema_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ISchema_nameContext) +} + +func (s *Create_virtual_table_stmtContext) DOT() antlr.TerminalNode { + return s.GetToken(SQLiteParserDOT, 0) +} + +func (s *Create_virtual_table_stmtContext) OPEN_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserOPEN_PAR, 0) +} + +func (s *Create_virtual_table_stmtContext) AllModule_argument() []IModule_argumentContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IModule_argumentContext); ok { + len++ + } + } + + tst := make([]IModule_argumentContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IModule_argumentContext); ok { + tst[i] = t.(IModule_argumentContext) + i++ + } + } + + return tst +} + +func (s *Create_virtual_table_stmtContext) Module_argument(i int) IModule_argumentContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IModule_argumentContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IModule_argumentContext) +} + +func (s *Create_virtual_table_stmtContext) CLOSE_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserCLOSE_PAR, 0) +} + +func (s *Create_virtual_table_stmtContext) AllCOMMA() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserCOMMA) +} + +func (s *Create_virtual_table_stmtContext) COMMA(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserCOMMA, i) +} + +func (s *Create_virtual_table_stmtContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Create_virtual_table_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Create_virtual_table_stmtContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterCreate_virtual_table_stmt(s) + } +} + +func (s *Create_virtual_table_stmtContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitCreate_virtual_table_stmt(s) + } +} + +func (p *SQLiteParser) Create_virtual_table_stmt() (localctx ICreate_virtual_table_stmtContext) { + this := p + _ = this + + localctx = NewCreate_virtual_table_stmtContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 46, SQLiteParserRULE_create_virtual_table_stmt) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(731) + p.Match(SQLiteParserCREATE_) + } + { + p.SetState(732) + p.Match(SQLiteParserVIRTUAL_) + } + { + p.SetState(733) + p.Match(SQLiteParserTABLE_) + } + p.SetState(737) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 86, p.GetParserRuleContext()) == 1 { + { + p.SetState(734) + p.Match(SQLiteParserIF_) + } + { + p.SetState(735) + p.Match(SQLiteParserNOT_) + } + { + p.SetState(736) + p.Match(SQLiteParserEXISTS_) + } + + } + p.SetState(742) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 87, p.GetParserRuleContext()) == 1 { + { + p.SetState(739) + p.Schema_name() + } + { + p.SetState(740) + p.Match(SQLiteParserDOT) + } + + } + { + p.SetState(744) + p.Table_name() + } + { + p.SetState(745) + p.Match(SQLiteParserUSING_) + } + { + p.SetState(746) + p.Module_name() + } + p.SetState(758) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserOPEN_PAR { + { + p.SetState(747) + p.Match(SQLiteParserOPEN_PAR) + } + { + p.SetState(748) + p.Module_argument() + } + p.SetState(753) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for _la == SQLiteParserCOMMA { + { + p.SetState(749) + p.Match(SQLiteParserCOMMA) + } + { + p.SetState(750) + p.Module_argument() + } + + p.SetState(755) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + { + p.SetState(756) + p.Match(SQLiteParserCLOSE_PAR) + } + + } + + return localctx +} + +// IWith_clauseContext is an interface to support dynamic dispatch. +type IWith_clauseContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + WITH_() antlr.TerminalNode + AllCte_table_name() []ICte_table_nameContext + Cte_table_name(i int) ICte_table_nameContext + AllAS_() []antlr.TerminalNode + AS_(i int) antlr.TerminalNode + AllOPEN_PAR() []antlr.TerminalNode + OPEN_PAR(i int) antlr.TerminalNode + AllSelect_stmt() []ISelect_stmtContext + Select_stmt(i int) ISelect_stmtContext + AllCLOSE_PAR() []antlr.TerminalNode + CLOSE_PAR(i int) antlr.TerminalNode + RECURSIVE_() antlr.TerminalNode + AllCOMMA() []antlr.TerminalNode + COMMA(i int) antlr.TerminalNode + + // IsWith_clauseContext differentiates from other interfaces. + IsWith_clauseContext() +} + +type With_clauseContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyWith_clauseContext() *With_clauseContext { + var p = new(With_clauseContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_with_clause + return p +} + +func (*With_clauseContext) IsWith_clauseContext() {} + +func NewWith_clauseContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *With_clauseContext { + var p = new(With_clauseContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_with_clause + + return p +} + +func (s *With_clauseContext) GetParser() antlr.Parser { return s.parser } + +func (s *With_clauseContext) WITH_() antlr.TerminalNode { + return s.GetToken(SQLiteParserWITH_, 0) +} + +func (s *With_clauseContext) AllCte_table_name() []ICte_table_nameContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(ICte_table_nameContext); ok { + len++ + } + } + + tst := make([]ICte_table_nameContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(ICte_table_nameContext); ok { + tst[i] = t.(ICte_table_nameContext) + i++ + } + } + + return tst +} + +func (s *With_clauseContext) Cte_table_name(i int) ICte_table_nameContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ICte_table_nameContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(ICte_table_nameContext) +} + +func (s *With_clauseContext) AllAS_() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserAS_) +} + +func (s *With_clauseContext) AS_(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserAS_, i) +} + +func (s *With_clauseContext) AllOPEN_PAR() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserOPEN_PAR) +} + +func (s *With_clauseContext) OPEN_PAR(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserOPEN_PAR, i) +} + +func (s *With_clauseContext) AllSelect_stmt() []ISelect_stmtContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(ISelect_stmtContext); ok { + len++ + } + } + + tst := make([]ISelect_stmtContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(ISelect_stmtContext); ok { + tst[i] = t.(ISelect_stmtContext) + i++ + } + } + + return tst +} + +func (s *With_clauseContext) Select_stmt(i int) ISelect_stmtContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ISelect_stmtContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(ISelect_stmtContext) +} + +func (s *With_clauseContext) AllCLOSE_PAR() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserCLOSE_PAR) +} + +func (s *With_clauseContext) CLOSE_PAR(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserCLOSE_PAR, i) +} + +func (s *With_clauseContext) RECURSIVE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserRECURSIVE_, 0) +} + +func (s *With_clauseContext) AllCOMMA() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserCOMMA) +} + +func (s *With_clauseContext) COMMA(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserCOMMA, i) +} + +func (s *With_clauseContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *With_clauseContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *With_clauseContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterWith_clause(s) + } +} + +func (s *With_clauseContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitWith_clause(s) + } +} + +func (p *SQLiteParser) With_clause() (localctx IWith_clauseContext) { + this := p + _ = this + + localctx = NewWith_clauseContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 48, SQLiteParserRULE_with_clause) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(760) + p.Match(SQLiteParserWITH_) + } + p.SetState(762) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 90, p.GetParserRuleContext()) == 1 { + { + p.SetState(761) + p.Match(SQLiteParserRECURSIVE_) + } + + } + { + p.SetState(764) + p.Cte_table_name() + } + { + p.SetState(765) + p.Match(SQLiteParserAS_) + } + { + p.SetState(766) + p.Match(SQLiteParserOPEN_PAR) + } + { + p.SetState(767) + p.Select_stmt() + } + { + p.SetState(768) + p.Match(SQLiteParserCLOSE_PAR) + } + p.SetState(778) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for _la == SQLiteParserCOMMA { + { + p.SetState(769) + p.Match(SQLiteParserCOMMA) + } + { + p.SetState(770) + p.Cte_table_name() + } + { + p.SetState(771) + p.Match(SQLiteParserAS_) + } + { + p.SetState(772) + p.Match(SQLiteParserOPEN_PAR) + } + { + p.SetState(773) + p.Select_stmt() + } + { + p.SetState(774) + p.Match(SQLiteParserCLOSE_PAR) + } + + p.SetState(780) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + + return localctx +} + +// ICte_table_nameContext is an interface to support dynamic dispatch. +type ICte_table_nameContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + Table_name() ITable_nameContext + OPEN_PAR() antlr.TerminalNode + AllColumn_name() []IColumn_nameContext + Column_name(i int) IColumn_nameContext + CLOSE_PAR() antlr.TerminalNode + AllCOMMA() []antlr.TerminalNode + COMMA(i int) antlr.TerminalNode + + // IsCte_table_nameContext differentiates from other interfaces. + IsCte_table_nameContext() +} + +type Cte_table_nameContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyCte_table_nameContext() *Cte_table_nameContext { + var p = new(Cte_table_nameContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_cte_table_name + return p +} + +func (*Cte_table_nameContext) IsCte_table_nameContext() {} + +func NewCte_table_nameContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Cte_table_nameContext { + var p = new(Cte_table_nameContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_cte_table_name + + return p +} + +func (s *Cte_table_nameContext) GetParser() antlr.Parser { return s.parser } + +func (s *Cte_table_nameContext) Table_name() ITable_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ITable_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ITable_nameContext) +} + +func (s *Cte_table_nameContext) OPEN_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserOPEN_PAR, 0) +} + +func (s *Cte_table_nameContext) AllColumn_name() []IColumn_nameContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IColumn_nameContext); ok { + len++ + } + } + + tst := make([]IColumn_nameContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IColumn_nameContext); ok { + tst[i] = t.(IColumn_nameContext) + i++ + } + } + + return tst +} + +func (s *Cte_table_nameContext) Column_name(i int) IColumn_nameContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IColumn_nameContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IColumn_nameContext) +} + +func (s *Cte_table_nameContext) CLOSE_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserCLOSE_PAR, 0) +} + +func (s *Cte_table_nameContext) AllCOMMA() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserCOMMA) +} + +func (s *Cte_table_nameContext) COMMA(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserCOMMA, i) +} + +func (s *Cte_table_nameContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Cte_table_nameContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Cte_table_nameContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterCte_table_name(s) + } +} + +func (s *Cte_table_nameContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitCte_table_name(s) + } +} + +func (p *SQLiteParser) Cte_table_name() (localctx ICte_table_nameContext) { + this := p + _ = this + + localctx = NewCte_table_nameContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 50, SQLiteParserRULE_cte_table_name) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(781) + p.Table_name() + } + p.SetState(793) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserOPEN_PAR { + { + p.SetState(782) + p.Match(SQLiteParserOPEN_PAR) + } + { + p.SetState(783) + p.Column_name() + } + p.SetState(788) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for _la == SQLiteParserCOMMA { + { + p.SetState(784) + p.Match(SQLiteParserCOMMA) + } + { + p.SetState(785) + p.Column_name() + } + + p.SetState(790) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + { + p.SetState(791) + p.Match(SQLiteParserCLOSE_PAR) + } + + } + + return localctx +} + +// IRecursive_cteContext is an interface to support dynamic dispatch. +type IRecursive_cteContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + Cte_table_name() ICte_table_nameContext + AS_() antlr.TerminalNode + OPEN_PAR() antlr.TerminalNode + Initial_select() IInitial_selectContext + UNION_() antlr.TerminalNode + Recursive_select() IRecursive_selectContext + CLOSE_PAR() antlr.TerminalNode + ALL_() antlr.TerminalNode + + // IsRecursive_cteContext differentiates from other interfaces. + IsRecursive_cteContext() +} + +type Recursive_cteContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyRecursive_cteContext() *Recursive_cteContext { + var p = new(Recursive_cteContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_recursive_cte + return p +} + +func (*Recursive_cteContext) IsRecursive_cteContext() {} + +func NewRecursive_cteContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Recursive_cteContext { + var p = new(Recursive_cteContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_recursive_cte + + return p +} + +func (s *Recursive_cteContext) GetParser() antlr.Parser { return s.parser } + +func (s *Recursive_cteContext) Cte_table_name() ICte_table_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ICte_table_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ICte_table_nameContext) +} + +func (s *Recursive_cteContext) AS_() antlr.TerminalNode { + return s.GetToken(SQLiteParserAS_, 0) +} + +func (s *Recursive_cteContext) OPEN_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserOPEN_PAR, 0) +} + +func (s *Recursive_cteContext) Initial_select() IInitial_selectContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IInitial_selectContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IInitial_selectContext) +} + +func (s *Recursive_cteContext) UNION_() antlr.TerminalNode { + return s.GetToken(SQLiteParserUNION_, 0) +} + +func (s *Recursive_cteContext) Recursive_select() IRecursive_selectContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IRecursive_selectContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IRecursive_selectContext) +} + +func (s *Recursive_cteContext) CLOSE_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserCLOSE_PAR, 0) +} + +func (s *Recursive_cteContext) ALL_() antlr.TerminalNode { + return s.GetToken(SQLiteParserALL_, 0) +} + +func (s *Recursive_cteContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Recursive_cteContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Recursive_cteContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterRecursive_cte(s) + } +} + +func (s *Recursive_cteContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitRecursive_cte(s) + } +} + +func (p *SQLiteParser) Recursive_cte() (localctx IRecursive_cteContext) { + this := p + _ = this + + localctx = NewRecursive_cteContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 52, SQLiteParserRULE_recursive_cte) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(795) + p.Cte_table_name() + } + { + p.SetState(796) + p.Match(SQLiteParserAS_) + } + { + p.SetState(797) + p.Match(SQLiteParserOPEN_PAR) + } + { + p.SetState(798) + p.Initial_select() + } + { + p.SetState(799) + p.Match(SQLiteParserUNION_) + } + p.SetState(801) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserALL_ { + { + p.SetState(800) + p.Match(SQLiteParserALL_) + } + + } + { + p.SetState(803) + p.Recursive_select() + } + { + p.SetState(804) + p.Match(SQLiteParserCLOSE_PAR) + } + + return localctx +} + +// ICommon_table_expressionContext is an interface to support dynamic dispatch. +type ICommon_table_expressionContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + Table_name() ITable_nameContext + AS_() antlr.TerminalNode + AllOPEN_PAR() []antlr.TerminalNode + OPEN_PAR(i int) antlr.TerminalNode + Select_stmt() ISelect_stmtContext + AllCLOSE_PAR() []antlr.TerminalNode + CLOSE_PAR(i int) antlr.TerminalNode + AllColumn_name() []IColumn_nameContext + Column_name(i int) IColumn_nameContext + AllCOMMA() []antlr.TerminalNode + COMMA(i int) antlr.TerminalNode + + // IsCommon_table_expressionContext differentiates from other interfaces. + IsCommon_table_expressionContext() +} + +type Common_table_expressionContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyCommon_table_expressionContext() *Common_table_expressionContext { + var p = new(Common_table_expressionContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_common_table_expression + return p +} + +func (*Common_table_expressionContext) IsCommon_table_expressionContext() {} + +func NewCommon_table_expressionContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Common_table_expressionContext { + var p = new(Common_table_expressionContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_common_table_expression + + return p +} + +func (s *Common_table_expressionContext) GetParser() antlr.Parser { return s.parser } + +func (s *Common_table_expressionContext) Table_name() ITable_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ITable_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ITable_nameContext) +} + +func (s *Common_table_expressionContext) AS_() antlr.TerminalNode { + return s.GetToken(SQLiteParserAS_, 0) +} + +func (s *Common_table_expressionContext) AllOPEN_PAR() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserOPEN_PAR) +} + +func (s *Common_table_expressionContext) OPEN_PAR(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserOPEN_PAR, i) +} + +func (s *Common_table_expressionContext) Select_stmt() ISelect_stmtContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ISelect_stmtContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ISelect_stmtContext) +} + +func (s *Common_table_expressionContext) AllCLOSE_PAR() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserCLOSE_PAR) +} + +func (s *Common_table_expressionContext) CLOSE_PAR(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserCLOSE_PAR, i) +} + +func (s *Common_table_expressionContext) AllColumn_name() []IColumn_nameContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IColumn_nameContext); ok { + len++ + } + } + + tst := make([]IColumn_nameContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IColumn_nameContext); ok { + tst[i] = t.(IColumn_nameContext) + i++ + } + } + + return tst +} + +func (s *Common_table_expressionContext) Column_name(i int) IColumn_nameContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IColumn_nameContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IColumn_nameContext) +} + +func (s *Common_table_expressionContext) AllCOMMA() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserCOMMA) +} + +func (s *Common_table_expressionContext) COMMA(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserCOMMA, i) +} + +func (s *Common_table_expressionContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Common_table_expressionContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Common_table_expressionContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterCommon_table_expression(s) + } +} + +func (s *Common_table_expressionContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitCommon_table_expression(s) + } +} + +func (p *SQLiteParser) Common_table_expression() (localctx ICommon_table_expressionContext) { + this := p + _ = this + + localctx = NewCommon_table_expressionContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 54, SQLiteParserRULE_common_table_expression) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(806) + p.Table_name() + } + p.SetState(818) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserOPEN_PAR { + { + p.SetState(807) + p.Match(SQLiteParserOPEN_PAR) + } + { + p.SetState(808) + p.Column_name() + } + p.SetState(813) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for _la == SQLiteParserCOMMA { + { + p.SetState(809) + p.Match(SQLiteParserCOMMA) + } + { + p.SetState(810) + p.Column_name() + } + + p.SetState(815) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + { + p.SetState(816) + p.Match(SQLiteParserCLOSE_PAR) + } + + } + { + p.SetState(820) + p.Match(SQLiteParserAS_) + } + { + p.SetState(821) + p.Match(SQLiteParserOPEN_PAR) + } + { + p.SetState(822) + p.Select_stmt() + } + { + p.SetState(823) + p.Match(SQLiteParserCLOSE_PAR) + } + + return localctx +} + +// IDelete_stmtContext is an interface to support dynamic dispatch. +type IDelete_stmtContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + DELETE_() antlr.TerminalNode + FROM_() antlr.TerminalNode + Qualified_table_name() IQualified_table_nameContext + With_clause() IWith_clauseContext + WHERE_() antlr.TerminalNode + Expr() IExprContext + Returning_clause() IReturning_clauseContext + + // IsDelete_stmtContext differentiates from other interfaces. + IsDelete_stmtContext() +} + +type Delete_stmtContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyDelete_stmtContext() *Delete_stmtContext { + var p = new(Delete_stmtContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_delete_stmt + return p +} + +func (*Delete_stmtContext) IsDelete_stmtContext() {} + +func NewDelete_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Delete_stmtContext { + var p = new(Delete_stmtContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_delete_stmt + + return p +} + +func (s *Delete_stmtContext) GetParser() antlr.Parser { return s.parser } + +func (s *Delete_stmtContext) DELETE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserDELETE_, 0) +} + +func (s *Delete_stmtContext) FROM_() antlr.TerminalNode { + return s.GetToken(SQLiteParserFROM_, 0) +} + +func (s *Delete_stmtContext) Qualified_table_name() IQualified_table_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IQualified_table_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IQualified_table_nameContext) +} + +func (s *Delete_stmtContext) With_clause() IWith_clauseContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IWith_clauseContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IWith_clauseContext) +} + +func (s *Delete_stmtContext) WHERE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserWHERE_, 0) +} + +func (s *Delete_stmtContext) Expr() IExprContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IExprContext) +} + +func (s *Delete_stmtContext) Returning_clause() IReturning_clauseContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IReturning_clauseContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IReturning_clauseContext) +} + +func (s *Delete_stmtContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Delete_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Delete_stmtContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterDelete_stmt(s) + } +} + +func (s *Delete_stmtContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitDelete_stmt(s) + } +} + +func (p *SQLiteParser) Delete_stmt() (localctx IDelete_stmtContext) { + this := p + _ = this + + localctx = NewDelete_stmtContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 56, SQLiteParserRULE_delete_stmt) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + p.SetState(826) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserWITH_ { + { + p.SetState(825) + p.With_clause() + } + + } + { + p.SetState(828) + p.Match(SQLiteParserDELETE_) + } + { + p.SetState(829) + p.Match(SQLiteParserFROM_) + } + { + p.SetState(830) + p.Qualified_table_name() + } + p.SetState(833) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserWHERE_ { + { + p.SetState(831) + p.Match(SQLiteParserWHERE_) + } + { + p.SetState(832) + p.expr(0) + } + + } + p.SetState(836) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserRETURNING_ { + { + p.SetState(835) + p.Returning_clause() + } + + } + + return localctx +} + +// IDelete_stmt_limitedContext is an interface to support dynamic dispatch. +type IDelete_stmt_limitedContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + DELETE_() antlr.TerminalNode + FROM_() antlr.TerminalNode + Qualified_table_name() IQualified_table_nameContext + With_clause() IWith_clauseContext + WHERE_() antlr.TerminalNode + Expr() IExprContext + Returning_clause() IReturning_clauseContext + Limit_stmt() ILimit_stmtContext + Order_by_stmt() IOrder_by_stmtContext + + // IsDelete_stmt_limitedContext differentiates from other interfaces. + IsDelete_stmt_limitedContext() +} + +type Delete_stmt_limitedContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyDelete_stmt_limitedContext() *Delete_stmt_limitedContext { + var p = new(Delete_stmt_limitedContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_delete_stmt_limited + return p +} + +func (*Delete_stmt_limitedContext) IsDelete_stmt_limitedContext() {} + +func NewDelete_stmt_limitedContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Delete_stmt_limitedContext { + var p = new(Delete_stmt_limitedContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_delete_stmt_limited + + return p +} + +func (s *Delete_stmt_limitedContext) GetParser() antlr.Parser { return s.parser } + +func (s *Delete_stmt_limitedContext) DELETE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserDELETE_, 0) +} + +func (s *Delete_stmt_limitedContext) FROM_() antlr.TerminalNode { + return s.GetToken(SQLiteParserFROM_, 0) +} + +func (s *Delete_stmt_limitedContext) Qualified_table_name() IQualified_table_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IQualified_table_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IQualified_table_nameContext) +} + +func (s *Delete_stmt_limitedContext) With_clause() IWith_clauseContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IWith_clauseContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IWith_clauseContext) +} + +func (s *Delete_stmt_limitedContext) WHERE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserWHERE_, 0) +} + +func (s *Delete_stmt_limitedContext) Expr() IExprContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IExprContext) +} + +func (s *Delete_stmt_limitedContext) Returning_clause() IReturning_clauseContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IReturning_clauseContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IReturning_clauseContext) +} + +func (s *Delete_stmt_limitedContext) Limit_stmt() ILimit_stmtContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ILimit_stmtContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ILimit_stmtContext) +} + +func (s *Delete_stmt_limitedContext) Order_by_stmt() IOrder_by_stmtContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IOrder_by_stmtContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IOrder_by_stmtContext) +} + +func (s *Delete_stmt_limitedContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Delete_stmt_limitedContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Delete_stmt_limitedContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterDelete_stmt_limited(s) + } +} + +func (s *Delete_stmt_limitedContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitDelete_stmt_limited(s) + } +} + +func (p *SQLiteParser) Delete_stmt_limited() (localctx IDelete_stmt_limitedContext) { + this := p + _ = this + + localctx = NewDelete_stmt_limitedContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 58, SQLiteParserRULE_delete_stmt_limited) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + p.SetState(839) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserWITH_ { + { + p.SetState(838) + p.With_clause() + } + + } + { + p.SetState(841) + p.Match(SQLiteParserDELETE_) + } + { + p.SetState(842) + p.Match(SQLiteParserFROM_) + } + { + p.SetState(843) + p.Qualified_table_name() + } + p.SetState(846) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserWHERE_ { + { + p.SetState(844) + p.Match(SQLiteParserWHERE_) + } + { + p.SetState(845) + p.expr(0) + } + + } + p.SetState(849) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserRETURNING_ { + { + p.SetState(848) + p.Returning_clause() + } + + } + p.SetState(855) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserLIMIT_ || _la == SQLiteParserORDER_ { + p.SetState(852) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserORDER_ { + { + p.SetState(851) + p.Order_by_stmt() + } + + } + { + p.SetState(854) + p.Limit_stmt() + } + + } + + return localctx +} + +// IDetach_stmtContext is an interface to support dynamic dispatch. +type IDetach_stmtContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + DETACH_() antlr.TerminalNode + Schema_name() ISchema_nameContext + DATABASE_() antlr.TerminalNode + + // IsDetach_stmtContext differentiates from other interfaces. + IsDetach_stmtContext() +} + +type Detach_stmtContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyDetach_stmtContext() *Detach_stmtContext { + var p = new(Detach_stmtContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_detach_stmt + return p +} + +func (*Detach_stmtContext) IsDetach_stmtContext() {} + +func NewDetach_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Detach_stmtContext { + var p = new(Detach_stmtContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_detach_stmt + + return p +} + +func (s *Detach_stmtContext) GetParser() antlr.Parser { return s.parser } + +func (s *Detach_stmtContext) DETACH_() antlr.TerminalNode { + return s.GetToken(SQLiteParserDETACH_, 0) +} + +func (s *Detach_stmtContext) Schema_name() ISchema_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ISchema_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ISchema_nameContext) +} + +func (s *Detach_stmtContext) DATABASE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserDATABASE_, 0) +} + +func (s *Detach_stmtContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Detach_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Detach_stmtContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterDetach_stmt(s) + } +} + +func (s *Detach_stmtContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitDetach_stmt(s) + } +} + +func (p *SQLiteParser) Detach_stmt() (localctx IDetach_stmtContext) { + this := p + _ = this + + localctx = NewDetach_stmtContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 60, SQLiteParserRULE_detach_stmt) + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(857) + p.Match(SQLiteParserDETACH_) + } + p.SetState(859) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 105, p.GetParserRuleContext()) == 1 { + { + p.SetState(858) + p.Match(SQLiteParserDATABASE_) + } + + } + { + p.SetState(861) + p.Schema_name() + } + + return localctx +} + +// IDrop_stmtContext is an interface to support dynamic dispatch. +type IDrop_stmtContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // GetObject returns the object token. + GetObject() antlr.Token + + // SetObject sets the object token. + SetObject(antlr.Token) + + // Getter signatures + DROP_() antlr.TerminalNode + Any_name() IAny_nameContext + INDEX_() antlr.TerminalNode + TABLE_() antlr.TerminalNode + TRIGGER_() antlr.TerminalNode + VIEW_() antlr.TerminalNode + IF_() antlr.TerminalNode + EXISTS_() antlr.TerminalNode + Schema_name() ISchema_nameContext + DOT() antlr.TerminalNode + + // IsDrop_stmtContext differentiates from other interfaces. + IsDrop_stmtContext() +} + +type Drop_stmtContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser + object antlr.Token +} + +func NewEmptyDrop_stmtContext() *Drop_stmtContext { + var p = new(Drop_stmtContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_drop_stmt + return p +} + +func (*Drop_stmtContext) IsDrop_stmtContext() {} + +func NewDrop_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Drop_stmtContext { + var p = new(Drop_stmtContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_drop_stmt + + return p +} + +func (s *Drop_stmtContext) GetParser() antlr.Parser { return s.parser } + +func (s *Drop_stmtContext) GetObject() antlr.Token { return s.object } + +func (s *Drop_stmtContext) SetObject(v antlr.Token) { s.object = v } + +func (s *Drop_stmtContext) DROP_() antlr.TerminalNode { + return s.GetToken(SQLiteParserDROP_, 0) +} + +func (s *Drop_stmtContext) Any_name() IAny_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IAny_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IAny_nameContext) +} + +func (s *Drop_stmtContext) INDEX_() antlr.TerminalNode { + return s.GetToken(SQLiteParserINDEX_, 0) +} + +func (s *Drop_stmtContext) TABLE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserTABLE_, 0) +} + +func (s *Drop_stmtContext) TRIGGER_() antlr.TerminalNode { + return s.GetToken(SQLiteParserTRIGGER_, 0) +} + +func (s *Drop_stmtContext) VIEW_() antlr.TerminalNode { + return s.GetToken(SQLiteParserVIEW_, 0) +} + +func (s *Drop_stmtContext) IF_() antlr.TerminalNode { + return s.GetToken(SQLiteParserIF_, 0) +} + +func (s *Drop_stmtContext) EXISTS_() antlr.TerminalNode { + return s.GetToken(SQLiteParserEXISTS_, 0) +} + +func (s *Drop_stmtContext) Schema_name() ISchema_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ISchema_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ISchema_nameContext) +} + +func (s *Drop_stmtContext) DOT() antlr.TerminalNode { + return s.GetToken(SQLiteParserDOT, 0) +} + +func (s *Drop_stmtContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Drop_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Drop_stmtContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterDrop_stmt(s) + } +} + +func (s *Drop_stmtContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitDrop_stmt(s) + } +} + +func (p *SQLiteParser) Drop_stmt() (localctx IDrop_stmtContext) { + this := p + _ = this + + localctx = NewDrop_stmtContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 62, SQLiteParserRULE_drop_stmt) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(863) + p.Match(SQLiteParserDROP_) + } + { + p.SetState(864) + + var _lt = p.GetTokenStream().LT(1) + + localctx.(*Drop_stmtContext).object = _lt + + _la = p.GetTokenStream().LA(1) + + if !((int64((_la-84)) & ^0x3f) == 0 && ((int64(1)<<(_la-84))&2324138882699886593) != 0) { + var _ri = p.GetErrorHandler().RecoverInline(p) + + localctx.(*Drop_stmtContext).object = _ri + } else { + p.GetErrorHandler().ReportMatch(p) + p.Consume() + } + } + p.SetState(867) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 106, p.GetParserRuleContext()) == 1 { + { + p.SetState(865) + p.Match(SQLiteParserIF_) + } + { + p.SetState(866) + p.Match(SQLiteParserEXISTS_) + } + + } + p.SetState(872) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 107, p.GetParserRuleContext()) == 1 { + { + p.SetState(869) + p.Schema_name() + } + { + p.SetState(870) + p.Match(SQLiteParserDOT) + } + + } + { + p.SetState(874) + p.Any_name() + } + + return localctx +} + +// IExprContext is an interface to support dynamic dispatch. +type IExprContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + Literal_value() ILiteral_valueContext + BIND_PARAMETER() antlr.TerminalNode + Column_name() IColumn_nameContext + Table_name() ITable_nameContext + AllDOT() []antlr.TerminalNode + DOT(i int) antlr.TerminalNode + Schema_name() ISchema_nameContext + Unary_operator() IUnary_operatorContext + AllExpr() []IExprContext + Expr(i int) IExprContext + Function_name() IFunction_nameContext + OPEN_PAR() antlr.TerminalNode + CLOSE_PAR() antlr.TerminalNode + STAR() antlr.TerminalNode + Filter_clause() IFilter_clauseContext + Over_clause() IOver_clauseContext + DISTINCT_() antlr.TerminalNode + AllCOMMA() []antlr.TerminalNode + COMMA(i int) antlr.TerminalNode + CAST_() antlr.TerminalNode + AS_() antlr.TerminalNode + Type_name() IType_nameContext + Select_stmt() ISelect_stmtContext + EXISTS_() antlr.TerminalNode + NOT_() antlr.TerminalNode + CASE_() antlr.TerminalNode + END_() antlr.TerminalNode + AllWHEN_() []antlr.TerminalNode + WHEN_(i int) antlr.TerminalNode + AllTHEN_() []antlr.TerminalNode + THEN_(i int) antlr.TerminalNode + ELSE_() antlr.TerminalNode + Raise_function() IRaise_functionContext + PIPE2() antlr.TerminalNode + DIV() antlr.TerminalNode + MOD() antlr.TerminalNode + PLUS() antlr.TerminalNode + MINUS() antlr.TerminalNode + LT2() antlr.TerminalNode + GT2() antlr.TerminalNode + AMP() antlr.TerminalNode + PIPE() antlr.TerminalNode + LT() antlr.TerminalNode + LT_EQ() antlr.TerminalNode + GT() antlr.TerminalNode + GT_EQ() antlr.TerminalNode + ASSIGN() antlr.TerminalNode + EQ() antlr.TerminalNode + NOT_EQ1() antlr.TerminalNode + NOT_EQ2() antlr.TerminalNode + IS_() antlr.TerminalNode + IN_() antlr.TerminalNode + LIKE_() antlr.TerminalNode + GLOB_() antlr.TerminalNode + MATCH_() antlr.TerminalNode + REGEXP_() antlr.TerminalNode + AND_() antlr.TerminalNode + OR_() antlr.TerminalNode + BETWEEN_() antlr.TerminalNode + COLLATE_() antlr.TerminalNode + Collation_name() ICollation_nameContext + ESCAPE_() antlr.TerminalNode + ISNULL_() antlr.TerminalNode + NOTNULL_() antlr.TerminalNode + NULL_() antlr.TerminalNode + Table_function_name() ITable_function_nameContext + + // IsExprContext differentiates from other interfaces. + IsExprContext() +} + +type ExprContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyExprContext() *ExprContext { + var p = new(ExprContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_expr + return p +} + +func (*ExprContext) IsExprContext() {} + +func NewExprContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ExprContext { + var p = new(ExprContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_expr + + return p +} + +func (s *ExprContext) GetParser() antlr.Parser { return s.parser } + +func (s *ExprContext) Literal_value() ILiteral_valueContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ILiteral_valueContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ILiteral_valueContext) +} + +func (s *ExprContext) BIND_PARAMETER() antlr.TerminalNode { + return s.GetToken(SQLiteParserBIND_PARAMETER, 0) +} + +func (s *ExprContext) Column_name() IColumn_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IColumn_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IColumn_nameContext) +} + +func (s *ExprContext) Table_name() ITable_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ITable_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ITable_nameContext) +} + +func (s *ExprContext) AllDOT() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserDOT) +} + +func (s *ExprContext) DOT(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserDOT, i) +} + +func (s *ExprContext) Schema_name() ISchema_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ISchema_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ISchema_nameContext) +} + +func (s *ExprContext) Unary_operator() IUnary_operatorContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IUnary_operatorContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IUnary_operatorContext) +} + +func (s *ExprContext) AllExpr() []IExprContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IExprContext); ok { + len++ + } + } + + tst := make([]IExprContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IExprContext); ok { + tst[i] = t.(IExprContext) + i++ + } + } + + return tst +} + +func (s *ExprContext) Expr(i int) IExprContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IExprContext) +} + +func (s *ExprContext) Function_name() IFunction_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IFunction_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IFunction_nameContext) +} + +func (s *ExprContext) OPEN_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserOPEN_PAR, 0) +} + +func (s *ExprContext) CLOSE_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserCLOSE_PAR, 0) +} + +func (s *ExprContext) STAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserSTAR, 0) +} + +func (s *ExprContext) Filter_clause() IFilter_clauseContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IFilter_clauseContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IFilter_clauseContext) +} + +func (s *ExprContext) Over_clause() IOver_clauseContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IOver_clauseContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IOver_clauseContext) +} + +func (s *ExprContext) DISTINCT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserDISTINCT_, 0) +} + +func (s *ExprContext) AllCOMMA() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserCOMMA) +} + +func (s *ExprContext) COMMA(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserCOMMA, i) +} + +func (s *ExprContext) CAST_() antlr.TerminalNode { + return s.GetToken(SQLiteParserCAST_, 0) +} + +func (s *ExprContext) AS_() antlr.TerminalNode { + return s.GetToken(SQLiteParserAS_, 0) +} + +func (s *ExprContext) Type_name() IType_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IType_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IType_nameContext) +} + +func (s *ExprContext) Select_stmt() ISelect_stmtContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ISelect_stmtContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ISelect_stmtContext) +} + +func (s *ExprContext) EXISTS_() antlr.TerminalNode { + return s.GetToken(SQLiteParserEXISTS_, 0) +} + +func (s *ExprContext) NOT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserNOT_, 0) +} + +func (s *ExprContext) CASE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserCASE_, 0) +} + +func (s *ExprContext) END_() antlr.TerminalNode { + return s.GetToken(SQLiteParserEND_, 0) +} + +func (s *ExprContext) AllWHEN_() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserWHEN_) +} + +func (s *ExprContext) WHEN_(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserWHEN_, i) +} + +func (s *ExprContext) AllTHEN_() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserTHEN_) +} + +func (s *ExprContext) THEN_(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserTHEN_, i) +} + +func (s *ExprContext) ELSE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserELSE_, 0) +} + +func (s *ExprContext) Raise_function() IRaise_functionContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IRaise_functionContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IRaise_functionContext) +} + +func (s *ExprContext) PIPE2() antlr.TerminalNode { + return s.GetToken(SQLiteParserPIPE2, 0) +} + +func (s *ExprContext) DIV() antlr.TerminalNode { + return s.GetToken(SQLiteParserDIV, 0) +} + +func (s *ExprContext) MOD() antlr.TerminalNode { + return s.GetToken(SQLiteParserMOD, 0) +} + +func (s *ExprContext) PLUS() antlr.TerminalNode { + return s.GetToken(SQLiteParserPLUS, 0) +} + +func (s *ExprContext) MINUS() antlr.TerminalNode { + return s.GetToken(SQLiteParserMINUS, 0) +} + +func (s *ExprContext) LT2() antlr.TerminalNode { + return s.GetToken(SQLiteParserLT2, 0) +} + +func (s *ExprContext) GT2() antlr.TerminalNode { + return s.GetToken(SQLiteParserGT2, 0) +} + +func (s *ExprContext) AMP() antlr.TerminalNode { + return s.GetToken(SQLiteParserAMP, 0) +} + +func (s *ExprContext) PIPE() antlr.TerminalNode { + return s.GetToken(SQLiteParserPIPE, 0) +} + +func (s *ExprContext) LT() antlr.TerminalNode { + return s.GetToken(SQLiteParserLT, 0) +} + +func (s *ExprContext) LT_EQ() antlr.TerminalNode { + return s.GetToken(SQLiteParserLT_EQ, 0) +} + +func (s *ExprContext) GT() antlr.TerminalNode { + return s.GetToken(SQLiteParserGT, 0) +} + +func (s *ExprContext) GT_EQ() antlr.TerminalNode { + return s.GetToken(SQLiteParserGT_EQ, 0) +} + +func (s *ExprContext) ASSIGN() antlr.TerminalNode { + return s.GetToken(SQLiteParserASSIGN, 0) +} + +func (s *ExprContext) EQ() antlr.TerminalNode { + return s.GetToken(SQLiteParserEQ, 0) +} + +func (s *ExprContext) NOT_EQ1() antlr.TerminalNode { + return s.GetToken(SQLiteParserNOT_EQ1, 0) +} + +func (s *ExprContext) NOT_EQ2() antlr.TerminalNode { + return s.GetToken(SQLiteParserNOT_EQ2, 0) +} + +func (s *ExprContext) IS_() antlr.TerminalNode { + return s.GetToken(SQLiteParserIS_, 0) +} + +func (s *ExprContext) IN_() antlr.TerminalNode { + return s.GetToken(SQLiteParserIN_, 0) +} + +func (s *ExprContext) LIKE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserLIKE_, 0) +} + +func (s *ExprContext) GLOB_() antlr.TerminalNode { + return s.GetToken(SQLiteParserGLOB_, 0) +} + +func (s *ExprContext) MATCH_() antlr.TerminalNode { + return s.GetToken(SQLiteParserMATCH_, 0) +} + +func (s *ExprContext) REGEXP_() antlr.TerminalNode { + return s.GetToken(SQLiteParserREGEXP_, 0) +} + +func (s *ExprContext) AND_() antlr.TerminalNode { + return s.GetToken(SQLiteParserAND_, 0) +} + +func (s *ExprContext) OR_() antlr.TerminalNode { + return s.GetToken(SQLiteParserOR_, 0) +} + +func (s *ExprContext) BETWEEN_() antlr.TerminalNode { + return s.GetToken(SQLiteParserBETWEEN_, 0) +} + +func (s *ExprContext) COLLATE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserCOLLATE_, 0) +} + +func (s *ExprContext) Collation_name() ICollation_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ICollation_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ICollation_nameContext) +} + +func (s *ExprContext) ESCAPE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserESCAPE_, 0) +} + +func (s *ExprContext) ISNULL_() antlr.TerminalNode { + return s.GetToken(SQLiteParserISNULL_, 0) +} + +func (s *ExprContext) NOTNULL_() antlr.TerminalNode { + return s.GetToken(SQLiteParserNOTNULL_, 0) +} + +func (s *ExprContext) NULL_() antlr.TerminalNode { + return s.GetToken(SQLiteParserNULL_, 0) +} + +func (s *ExprContext) Table_function_name() ITable_function_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ITable_function_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ITable_function_nameContext) +} + +func (s *ExprContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *ExprContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *ExprContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterExpr(s) + } +} + +func (s *ExprContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitExpr(s) + } +} + +func (p *SQLiteParser) Expr() (localctx IExprContext) { + return p.expr(0) +} + +func (p *SQLiteParser) expr(_p int) (localctx IExprContext) { + this := p + _ = this + + var _parentctx antlr.ParserRuleContext = p.GetParserRuleContext() + _parentState := p.GetState() + localctx = NewExprContext(p, p.GetParserRuleContext(), _parentState) + var _prevctx IExprContext = localctx + var _ antlr.ParserRuleContext = _prevctx // TODO: To prevent unused variable warning. + _startState := 64 + p.EnterRecursionRule(localctx, 64, SQLiteParserRULE_expr, _p) + var _la int + + defer func() { + p.UnrollRecursionContexts(_parentctx) + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + var _alt int + + p.EnterOuterAlt(localctx, 1) + p.SetState(964) + p.GetErrorHandler().Sync(p) + switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 121, p.GetParserRuleContext()) { + case 1: + { + p.SetState(877) + p.Literal_value() + } + + case 2: + { + p.SetState(878) + p.Match(SQLiteParserBIND_PARAMETER) + } + + case 3: + p.SetState(887) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 109, p.GetParserRuleContext()) == 1 { + p.SetState(882) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 108, p.GetParserRuleContext()) == 1 { + { + p.SetState(879) + p.Schema_name() + } + { + p.SetState(880) + p.Match(SQLiteParserDOT) + } + + } + { + p.SetState(884) + p.Table_name() + } + { + p.SetState(885) + p.Match(SQLiteParserDOT) + } + + } + { + p.SetState(889) + p.Column_name() + } + + case 4: + { + p.SetState(890) + p.Unary_operator() + } + { + p.SetState(891) + p.expr(21) + } + + case 5: + { + p.SetState(893) + p.Function_name() + } + { + p.SetState(894) + p.Match(SQLiteParserOPEN_PAR) + } + p.SetState(907) + p.GetErrorHandler().Sync(p) + + switch p.GetTokenStream().LA(1) { + case SQLiteParserOPEN_PAR, SQLiteParserPLUS, SQLiteParserMINUS, SQLiteParserTILDE, SQLiteParserABORT_, SQLiteParserACTION_, SQLiteParserADD_, SQLiteParserAFTER_, SQLiteParserALL_, SQLiteParserALTER_, SQLiteParserANALYZE_, SQLiteParserAND_, SQLiteParserAS_, SQLiteParserASC_, SQLiteParserATTACH_, SQLiteParserAUTOINCREMENT_, SQLiteParserBEFORE_, SQLiteParserBEGIN_, SQLiteParserBETWEEN_, SQLiteParserBY_, SQLiteParserCASCADE_, SQLiteParserCASE_, SQLiteParserCAST_, SQLiteParserCHECK_, SQLiteParserCOLLATE_, SQLiteParserCOLUMN_, SQLiteParserCOMMIT_, SQLiteParserCONFLICT_, SQLiteParserCONSTRAINT_, SQLiteParserCREATE_, SQLiteParserCROSS_, SQLiteParserCURRENT_DATE_, SQLiteParserCURRENT_TIME_, SQLiteParserCURRENT_TIMESTAMP_, SQLiteParserDATABASE_, SQLiteParserDEFAULT_, SQLiteParserDEFERRABLE_, SQLiteParserDEFERRED_, SQLiteParserDELETE_, SQLiteParserDESC_, SQLiteParserDETACH_, SQLiteParserDISTINCT_, SQLiteParserDROP_, SQLiteParserEACH_, SQLiteParserELSE_, SQLiteParserEND_, SQLiteParserESCAPE_, SQLiteParserEXCEPT_, SQLiteParserEXCLUSIVE_, SQLiteParserEXISTS_, SQLiteParserEXPLAIN_, SQLiteParserFAIL_, SQLiteParserFOR_, SQLiteParserFOREIGN_, SQLiteParserFROM_, SQLiteParserFULL_, SQLiteParserGLOB_, SQLiteParserGROUP_, SQLiteParserHAVING_, SQLiteParserIF_, SQLiteParserIGNORE_, SQLiteParserIMMEDIATE_, SQLiteParserIN_, SQLiteParserINDEX_, SQLiteParserINDEXED_, SQLiteParserINITIALLY_, SQLiteParserINNER_, SQLiteParserINSERT_, SQLiteParserINSTEAD_, SQLiteParserINTERSECT_, SQLiteParserINTO_, SQLiteParserIS_, SQLiteParserISNULL_, SQLiteParserJOIN_, SQLiteParserKEY_, SQLiteParserLEFT_, SQLiteParserLIKE_, SQLiteParserLIMIT_, SQLiteParserMATCH_, SQLiteParserNATURAL_, SQLiteParserNO_, SQLiteParserNOT_, SQLiteParserNOTNULL_, SQLiteParserNULL_, SQLiteParserOF_, SQLiteParserOFFSET_, SQLiteParserON_, SQLiteParserOR_, SQLiteParserORDER_, SQLiteParserOUTER_, SQLiteParserPLAN_, SQLiteParserPRAGMA_, SQLiteParserPRIMARY_, SQLiteParserQUERY_, SQLiteParserRAISE_, SQLiteParserRECURSIVE_, SQLiteParserREFERENCES_, SQLiteParserREGEXP_, SQLiteParserREINDEX_, SQLiteParserRELEASE_, SQLiteParserRENAME_, SQLiteParserREPLACE_, SQLiteParserRESTRICT_, SQLiteParserRIGHT_, SQLiteParserROLLBACK_, SQLiteParserROW_, SQLiteParserROWS_, SQLiteParserSAVEPOINT_, SQLiteParserSELECT_, SQLiteParserSET_, SQLiteParserTABLE_, SQLiteParserTEMP_, SQLiteParserTEMPORARY_, SQLiteParserTHEN_, SQLiteParserTO_, SQLiteParserTRANSACTION_, SQLiteParserTRIGGER_, SQLiteParserUNION_, SQLiteParserUNIQUE_, SQLiteParserUPDATE_, SQLiteParserUSING_, SQLiteParserVACUUM_, SQLiteParserVALUES_, SQLiteParserVIEW_, SQLiteParserVIRTUAL_, SQLiteParserWHEN_, SQLiteParserWHERE_, SQLiteParserWITH_, SQLiteParserWITHOUT_, SQLiteParserFIRST_VALUE_, SQLiteParserOVER_, SQLiteParserPARTITION_, SQLiteParserRANGE_, SQLiteParserPRECEDING_, SQLiteParserUNBOUNDED_, SQLiteParserCURRENT_, SQLiteParserFOLLOWING_, SQLiteParserCUME_DIST_, SQLiteParserDENSE_RANK_, SQLiteParserLAG_, SQLiteParserLAST_VALUE_, SQLiteParserLEAD_, SQLiteParserNTH_VALUE_, SQLiteParserNTILE_, SQLiteParserPERCENT_RANK_, SQLiteParserRANK_, SQLiteParserROW_NUMBER_, SQLiteParserGENERATED_, SQLiteParserALWAYS_, SQLiteParserSTORED_, SQLiteParserTRUE_, SQLiteParserFALSE_, SQLiteParserWINDOW_, SQLiteParserNULLS_, SQLiteParserFIRST_, SQLiteParserLAST_, SQLiteParserFILTER_, SQLiteParserGROUPS_, SQLiteParserEXCLUDE_, SQLiteParserIDENTIFIER, SQLiteParserNUMERIC_LITERAL, SQLiteParserBIND_PARAMETER, SQLiteParserSTRING_LITERAL, SQLiteParserBLOB_LITERAL: + p.SetState(896) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 110, p.GetParserRuleContext()) == 1 { + { + p.SetState(895) + p.Match(SQLiteParserDISTINCT_) + } + + } + { + p.SetState(898) + p.expr(0) + } + p.SetState(903) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for _la == SQLiteParserCOMMA { + { + p.SetState(899) + p.Match(SQLiteParserCOMMA) + } + { + p.SetState(900) + p.expr(0) + } + + p.SetState(905) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + + case SQLiteParserSTAR: + { + p.SetState(906) + p.Match(SQLiteParserSTAR) + } + + case SQLiteParserCLOSE_PAR: + + default: + } + { + p.SetState(909) + p.Match(SQLiteParserCLOSE_PAR) + } + p.SetState(911) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 113, p.GetParserRuleContext()) == 1 { + { + p.SetState(910) + p.Filter_clause() + } + + } + p.SetState(914) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 114, p.GetParserRuleContext()) == 1 { + { + p.SetState(913) + p.Over_clause() + } + + } + + case 6: + { + p.SetState(916) + p.Match(SQLiteParserOPEN_PAR) + } + { + p.SetState(917) + p.expr(0) + } + p.SetState(922) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for _la == SQLiteParserCOMMA { + { + p.SetState(918) + p.Match(SQLiteParserCOMMA) + } + { + p.SetState(919) + p.expr(0) + } + + p.SetState(924) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + { + p.SetState(925) + p.Match(SQLiteParserCLOSE_PAR) + } + + case 7: + { + p.SetState(927) + p.Match(SQLiteParserCAST_) + } + { + p.SetState(928) + p.Match(SQLiteParserOPEN_PAR) + } + { + p.SetState(929) + p.expr(0) + } + { + p.SetState(930) + p.Match(SQLiteParserAS_) + } + { + p.SetState(931) + p.Type_name() + } + { + p.SetState(932) + p.Match(SQLiteParserCLOSE_PAR) + } + + case 8: + p.SetState(938) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserEXISTS_ || _la == SQLiteParserNOT_ { + p.SetState(935) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserNOT_ { + { + p.SetState(934) + p.Match(SQLiteParserNOT_) + } + + } + { + p.SetState(937) + p.Match(SQLiteParserEXISTS_) + } + + } + { + p.SetState(940) + p.Match(SQLiteParserOPEN_PAR) + } + { + p.SetState(941) + p.Select_stmt() + } + { + p.SetState(942) + p.Match(SQLiteParserCLOSE_PAR) + } + + case 9: + { + p.SetState(944) + p.Match(SQLiteParserCASE_) + } + p.SetState(946) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 118, p.GetParserRuleContext()) == 1 { + { + p.SetState(945) + p.expr(0) + } + + } + p.SetState(953) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for ok := true; ok; ok = _la == SQLiteParserWHEN_ { + { + p.SetState(948) + p.Match(SQLiteParserWHEN_) + } + { + p.SetState(949) + p.expr(0) + } + { + p.SetState(950) + p.Match(SQLiteParserTHEN_) + } + { + p.SetState(951) + p.expr(0) + } + + p.SetState(955) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + p.SetState(959) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserELSE_ { + { + p.SetState(957) + p.Match(SQLiteParserELSE_) + } + { + p.SetState(958) + p.expr(0) + } + + } + { + p.SetState(961) + p.Match(SQLiteParserEND_) + } + + case 10: + { + p.SetState(963) + p.Raise_function() + } + + } + p.GetParserRuleContext().SetStop(p.GetTokenStream().LT(-1)) + p.SetState(1085) + p.GetErrorHandler().Sync(p) + _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 137, p.GetParserRuleContext()) + + for _alt != 2 && _alt != antlr.ATNInvalidAltNumber { + if _alt == 1 { + if p.GetParseListeners() != nil { + p.TriggerExitRuleEvent() + } + _prevctx = localctx + p.SetState(1083) + p.GetErrorHandler().Sync(p) + switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 136, p.GetParserRuleContext()) { + case 1: + localctx = NewExprContext(p, _parentctx, _parentState) + p.PushNewRecursionContext(localctx, _startState, SQLiteParserRULE_expr) + p.SetState(966) + + if !(p.Precpred(p.GetParserRuleContext(), 20)) { + panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 20)", "")) + } + { + p.SetState(967) + p.Match(SQLiteParserPIPE2) + } + { + p.SetState(968) + p.expr(21) + } + + case 2: + localctx = NewExprContext(p, _parentctx, _parentState) + p.PushNewRecursionContext(localctx, _startState, SQLiteParserRULE_expr) + p.SetState(969) + + if !(p.Precpred(p.GetParserRuleContext(), 19)) { + panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 19)", "")) + } + { + p.SetState(970) + _la = p.GetTokenStream().LA(1) + + if !((int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&12416) != 0) { + p.GetErrorHandler().RecoverInline(p) + } else { + p.GetErrorHandler().ReportMatch(p) + p.Consume() + } + } + { + p.SetState(971) + p.expr(20) + } + + case 3: + localctx = NewExprContext(p, _parentctx, _parentState) + p.PushNewRecursionContext(localctx, _startState, SQLiteParserRULE_expr) + p.SetState(972) + + if !(p.Precpred(p.GetParserRuleContext(), 18)) { + panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 18)", "")) + } + { + p.SetState(973) + _la = p.GetTokenStream().LA(1) + + if !(_la == SQLiteParserPLUS || _la == SQLiteParserMINUS) { + p.GetErrorHandler().RecoverInline(p) + } else { + p.GetErrorHandler().ReportMatch(p) + p.Consume() + } + } + { + p.SetState(974) + p.expr(19) + } + + case 4: + localctx = NewExprContext(p, _parentctx, _parentState) + p.PushNewRecursionContext(localctx, _startState, SQLiteParserRULE_expr) + p.SetState(975) + + if !(p.Precpred(p.GetParserRuleContext(), 17)) { + panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 17)", "")) + } + { + p.SetState(976) + _la = p.GetTokenStream().LA(1) + + if !((int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&245760) != 0) { + p.GetErrorHandler().RecoverInline(p) + } else { + p.GetErrorHandler().ReportMatch(p) + p.Consume() + } + } + { + p.SetState(977) + p.expr(18) + } + + case 5: + localctx = NewExprContext(p, _parentctx, _parentState) + p.PushNewRecursionContext(localctx, _startState, SQLiteParserRULE_expr) + p.SetState(978) + + if !(p.Precpred(p.GetParserRuleContext(), 16)) { + panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 16)", "")) + } + { + p.SetState(979) + _la = p.GetTokenStream().LA(1) + + if !((int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&3932160) != 0) { + p.GetErrorHandler().RecoverInline(p) + } else { + p.GetErrorHandler().ReportMatch(p) + p.Consume() + } + } + { + p.SetState(980) + p.expr(17) + } + + case 6: + localctx = NewExprContext(p, _parentctx, _parentState) + p.PushNewRecursionContext(localctx, _startState, SQLiteParserRULE_expr) + p.SetState(981) + + if !(p.Precpred(p.GetParserRuleContext(), 15)) { + panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 15)", "")) + } + p.SetState(994) + p.GetErrorHandler().Sync(p) + switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 122, p.GetParserRuleContext()) { + case 1: + { + p.SetState(982) + p.Match(SQLiteParserASSIGN) + } + + case 2: + { + p.SetState(983) + p.Match(SQLiteParserEQ) + } + + case 3: + { + p.SetState(984) + p.Match(SQLiteParserNOT_EQ1) + } + + case 4: + { + p.SetState(985) + p.Match(SQLiteParserNOT_EQ2) + } + + case 5: + { + p.SetState(986) + p.Match(SQLiteParserIS_) + } + + case 6: + { + p.SetState(987) + p.Match(SQLiteParserIS_) + } + { + p.SetState(988) + p.Match(SQLiteParserNOT_) + } + + case 7: + { + p.SetState(989) + p.Match(SQLiteParserIN_) + } + + case 8: + { + p.SetState(990) + p.Match(SQLiteParserLIKE_) + } + + case 9: + { + p.SetState(991) + p.Match(SQLiteParserGLOB_) + } + + case 10: + { + p.SetState(992) + p.Match(SQLiteParserMATCH_) + } + + case 11: + { + p.SetState(993) + p.Match(SQLiteParserREGEXP_) + } + + } + { + p.SetState(996) + p.expr(16) + } + + case 7: + localctx = NewExprContext(p, _parentctx, _parentState) + p.PushNewRecursionContext(localctx, _startState, SQLiteParserRULE_expr) + p.SetState(997) + + if !(p.Precpred(p.GetParserRuleContext(), 14)) { + panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 14)", "")) + } + { + p.SetState(998) + p.Match(SQLiteParserAND_) + } + { + p.SetState(999) + p.expr(15) + } + + case 8: + localctx = NewExprContext(p, _parentctx, _parentState) + p.PushNewRecursionContext(localctx, _startState, SQLiteParserRULE_expr) + p.SetState(1000) + + if !(p.Precpred(p.GetParserRuleContext(), 13)) { + panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 13)", "")) + } + { + p.SetState(1001) + p.Match(SQLiteParserOR_) + } + { + p.SetState(1002) + p.expr(14) + } + + case 9: + localctx = NewExprContext(p, _parentctx, _parentState) + p.PushNewRecursionContext(localctx, _startState, SQLiteParserRULE_expr) + p.SetState(1003) + + if !(p.Precpred(p.GetParserRuleContext(), 6)) { + panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 6)", "")) + } + { + p.SetState(1004) + p.Match(SQLiteParserIS_) + } + p.SetState(1006) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 123, p.GetParserRuleContext()) == 1 { + { + p.SetState(1005) + p.Match(SQLiteParserNOT_) + } + + } + { + p.SetState(1008) + p.expr(7) + } + + case 10: + localctx = NewExprContext(p, _parentctx, _parentState) + p.PushNewRecursionContext(localctx, _startState, SQLiteParserRULE_expr) + p.SetState(1009) + + if !(p.Precpred(p.GetParserRuleContext(), 5)) { + panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 5)", "")) + } + p.SetState(1011) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserNOT_ { + { + p.SetState(1010) + p.Match(SQLiteParserNOT_) + } + + } + { + p.SetState(1013) + p.Match(SQLiteParserBETWEEN_) + } + { + p.SetState(1014) + p.expr(0) + } + { + p.SetState(1015) + p.Match(SQLiteParserAND_) + } + { + p.SetState(1016) + p.expr(6) + } + + case 11: + localctx = NewExprContext(p, _parentctx, _parentState) + p.PushNewRecursionContext(localctx, _startState, SQLiteParserRULE_expr) + p.SetState(1018) + + if !(p.Precpred(p.GetParserRuleContext(), 9)) { + panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 9)", "")) + } + { + p.SetState(1019) + p.Match(SQLiteParserCOLLATE_) + } + { + p.SetState(1020) + p.Collation_name() + } + + case 12: + localctx = NewExprContext(p, _parentctx, _parentState) + p.PushNewRecursionContext(localctx, _startState, SQLiteParserRULE_expr) + p.SetState(1021) + + if !(p.Precpred(p.GetParserRuleContext(), 8)) { + panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 8)", "")) + } + p.SetState(1023) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserNOT_ { + { + p.SetState(1022) + p.Match(SQLiteParserNOT_) + } + + } + { + p.SetState(1025) + _la = p.GetTokenStream().LA(1) + + if !((int64((_la-77)) & ^0x3f) == 0 && ((int64(1)<<(_la-77))&2199028498433) != 0) { + p.GetErrorHandler().RecoverInline(p) + } else { + p.GetErrorHandler().ReportMatch(p) + p.Consume() + } + } + { + p.SetState(1026) + p.expr(0) + } + p.SetState(1029) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 126, p.GetParserRuleContext()) == 1 { + { + p.SetState(1027) + p.Match(SQLiteParserESCAPE_) + } + { + p.SetState(1028) + p.expr(0) + } + + } + + case 13: + localctx = NewExprContext(p, _parentctx, _parentState) + p.PushNewRecursionContext(localctx, _startState, SQLiteParserRULE_expr) + p.SetState(1031) + + if !(p.Precpred(p.GetParserRuleContext(), 7)) { + panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 7)", "")) + } + p.SetState(1036) + p.GetErrorHandler().Sync(p) + + switch p.GetTokenStream().LA(1) { + case SQLiteParserISNULL_: + { + p.SetState(1032) + p.Match(SQLiteParserISNULL_) + } + + case SQLiteParserNOTNULL_: + { + p.SetState(1033) + p.Match(SQLiteParserNOTNULL_) + } + + case SQLiteParserNOT_: + { + p.SetState(1034) + p.Match(SQLiteParserNOT_) + } + { + p.SetState(1035) + p.Match(SQLiteParserNULL_) + } + + default: + panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) + } + + case 14: + localctx = NewExprContext(p, _parentctx, _parentState) + p.PushNewRecursionContext(localctx, _startState, SQLiteParserRULE_expr) + p.SetState(1038) + + if !(p.Precpred(p.GetParserRuleContext(), 4)) { + panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 4)", "")) + } + p.SetState(1040) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserNOT_ { + { + p.SetState(1039) + p.Match(SQLiteParserNOT_) + } + + } + { + p.SetState(1042) + p.Match(SQLiteParserIN_) + } + p.SetState(1081) + p.GetErrorHandler().Sync(p) + switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 135, p.GetParserRuleContext()) { + case 1: + { + p.SetState(1043) + p.Match(SQLiteParserOPEN_PAR) + } + p.SetState(1053) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 130, p.GetParserRuleContext()) == 1 { + { + p.SetState(1044) + p.Select_stmt() + } + + } else if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 130, p.GetParserRuleContext()) == 2 { + { + p.SetState(1045) + p.expr(0) + } + p.SetState(1050) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for _la == SQLiteParserCOMMA { + { + p.SetState(1046) + p.Match(SQLiteParserCOMMA) + } + { + p.SetState(1047) + p.expr(0) + } + + p.SetState(1052) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + + } + { + p.SetState(1055) + p.Match(SQLiteParserCLOSE_PAR) + } + + case 2: + p.SetState(1059) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 131, p.GetParserRuleContext()) == 1 { + { + p.SetState(1056) + p.Schema_name() + } + { + p.SetState(1057) + p.Match(SQLiteParserDOT) + } + + } + { + p.SetState(1061) + p.Table_name() + } + + case 3: + p.SetState(1065) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 132, p.GetParserRuleContext()) == 1 { + { + p.SetState(1062) + p.Schema_name() + } + { + p.SetState(1063) + p.Match(SQLiteParserDOT) + } + + } + { + p.SetState(1067) + p.Table_function_name() + } + { + p.SetState(1068) + p.Match(SQLiteParserOPEN_PAR) + } + p.SetState(1077) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if ((int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&-33552632) != 0) || ((int64((_la-64)) & ^0x3f) == 0 && ((int64(1)<<(_la-64))&-1152921504606846977) != 0) || ((int64((_la-128)) & ^0x3f) == 0 && ((int64(1)<<(_la-128))&4476578029606273023) != 0) { + { + p.SetState(1069) + p.expr(0) + } + p.SetState(1074) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for _la == SQLiteParserCOMMA { + { + p.SetState(1070) + p.Match(SQLiteParserCOMMA) + } + { + p.SetState(1071) + p.expr(0) + } + + p.SetState(1076) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + + } + { + p.SetState(1079) + p.Match(SQLiteParserCLOSE_PAR) + } + + } + + } + + } + p.SetState(1087) + p.GetErrorHandler().Sync(p) + _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 137, p.GetParserRuleContext()) + } + + return localctx +} + +// IRaise_functionContext is an interface to support dynamic dispatch. +type IRaise_functionContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + RAISE_() antlr.TerminalNode + OPEN_PAR() antlr.TerminalNode + CLOSE_PAR() antlr.TerminalNode + IGNORE_() antlr.TerminalNode + COMMA() antlr.TerminalNode + Error_message() IError_messageContext + ROLLBACK_() antlr.TerminalNode + ABORT_() antlr.TerminalNode + FAIL_() antlr.TerminalNode + + // IsRaise_functionContext differentiates from other interfaces. + IsRaise_functionContext() +} + +type Raise_functionContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyRaise_functionContext() *Raise_functionContext { + var p = new(Raise_functionContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_raise_function + return p +} + +func (*Raise_functionContext) IsRaise_functionContext() {} + +func NewRaise_functionContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Raise_functionContext { + var p = new(Raise_functionContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_raise_function + + return p +} + +func (s *Raise_functionContext) GetParser() antlr.Parser { return s.parser } + +func (s *Raise_functionContext) RAISE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserRAISE_, 0) +} + +func (s *Raise_functionContext) OPEN_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserOPEN_PAR, 0) +} + +func (s *Raise_functionContext) CLOSE_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserCLOSE_PAR, 0) +} + +func (s *Raise_functionContext) IGNORE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserIGNORE_, 0) +} + +func (s *Raise_functionContext) COMMA() antlr.TerminalNode { + return s.GetToken(SQLiteParserCOMMA, 0) +} + +func (s *Raise_functionContext) Error_message() IError_messageContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IError_messageContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IError_messageContext) +} + +func (s *Raise_functionContext) ROLLBACK_() antlr.TerminalNode { + return s.GetToken(SQLiteParserROLLBACK_, 0) +} + +func (s *Raise_functionContext) ABORT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserABORT_, 0) +} + +func (s *Raise_functionContext) FAIL_() antlr.TerminalNode { + return s.GetToken(SQLiteParserFAIL_, 0) +} + +func (s *Raise_functionContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Raise_functionContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Raise_functionContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterRaise_function(s) + } +} + +func (s *Raise_functionContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitRaise_function(s) + } +} + +func (p *SQLiteParser) Raise_function() (localctx IRaise_functionContext) { + this := p + _ = this + + localctx = NewRaise_functionContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 66, SQLiteParserRULE_raise_function) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(1088) + p.Match(SQLiteParserRAISE_) + } + { + p.SetState(1089) + p.Match(SQLiteParserOPEN_PAR) + } + p.SetState(1094) + p.GetErrorHandler().Sync(p) + + switch p.GetTokenStream().LA(1) { + case SQLiteParserIGNORE_: + { + p.SetState(1090) + p.Match(SQLiteParserIGNORE_) + } + + case SQLiteParserABORT_, SQLiteParserFAIL_, SQLiteParserROLLBACK_: + { + p.SetState(1091) + _la = p.GetTokenStream().LA(1) + + if !(_la == SQLiteParserABORT_ || _la == SQLiteParserFAIL_ || _la == SQLiteParserROLLBACK_) { + p.GetErrorHandler().RecoverInline(p) + } else { + p.GetErrorHandler().ReportMatch(p) + p.Consume() + } + } + { + p.SetState(1092) + p.Match(SQLiteParserCOMMA) + } + { + p.SetState(1093) + p.Error_message() + } + + default: + panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) + } + { + p.SetState(1096) + p.Match(SQLiteParserCLOSE_PAR) + } + + return localctx +} + +// ILiteral_valueContext is an interface to support dynamic dispatch. +type ILiteral_valueContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + NUMERIC_LITERAL() antlr.TerminalNode + STRING_LITERAL() antlr.TerminalNode + BLOB_LITERAL() antlr.TerminalNode + NULL_() antlr.TerminalNode + TRUE_() antlr.TerminalNode + FALSE_() antlr.TerminalNode + CURRENT_TIME_() antlr.TerminalNode + CURRENT_DATE_() antlr.TerminalNode + CURRENT_TIMESTAMP_() antlr.TerminalNode + + // IsLiteral_valueContext differentiates from other interfaces. + IsLiteral_valueContext() +} + +type Literal_valueContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyLiteral_valueContext() *Literal_valueContext { + var p = new(Literal_valueContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_literal_value + return p +} + +func (*Literal_valueContext) IsLiteral_valueContext() {} + +func NewLiteral_valueContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Literal_valueContext { + var p = new(Literal_valueContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_literal_value + + return p +} + +func (s *Literal_valueContext) GetParser() antlr.Parser { return s.parser } + +func (s *Literal_valueContext) NUMERIC_LITERAL() antlr.TerminalNode { + return s.GetToken(SQLiteParserNUMERIC_LITERAL, 0) +} + +func (s *Literal_valueContext) STRING_LITERAL() antlr.TerminalNode { + return s.GetToken(SQLiteParserSTRING_LITERAL, 0) +} + +func (s *Literal_valueContext) BLOB_LITERAL() antlr.TerminalNode { + return s.GetToken(SQLiteParserBLOB_LITERAL, 0) +} + +func (s *Literal_valueContext) NULL_() antlr.TerminalNode { + return s.GetToken(SQLiteParserNULL_, 0) +} + +func (s *Literal_valueContext) TRUE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserTRUE_, 0) +} + +func (s *Literal_valueContext) FALSE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserFALSE_, 0) +} + +func (s *Literal_valueContext) CURRENT_TIME_() antlr.TerminalNode { + return s.GetToken(SQLiteParserCURRENT_TIME_, 0) +} + +func (s *Literal_valueContext) CURRENT_DATE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserCURRENT_DATE_, 0) +} + +func (s *Literal_valueContext) CURRENT_TIMESTAMP_() antlr.TerminalNode { + return s.GetToken(SQLiteParserCURRENT_TIMESTAMP_, 0) +} + +func (s *Literal_valueContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Literal_valueContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Literal_valueContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterLiteral_value(s) + } +} + +func (s *Literal_valueContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitLiteral_value(s) + } +} + +func (p *SQLiteParser) Literal_value() (localctx ILiteral_valueContext) { + this := p + _ = this + + localctx = NewLiteral_valueContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 68, SQLiteParserRULE_literal_value) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(1098) + _la = p.GetTokenStream().LA(1) + + if !(((int64((_la-52)) & ^0x3f) == 0 && ((int64(1)<<(_la-52))&4503599627370503) != 0) || ((int64((_la-172)) & ^0x3f) == 0 && ((int64(1)<<(_la-172))&212995) != 0)) { + p.GetErrorHandler().RecoverInline(p) + } else { + p.GetErrorHandler().ReportMatch(p) + p.Consume() + } + } + + return localctx +} + +// IValue_rowContext is an interface to support dynamic dispatch. +type IValue_rowContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + OPEN_PAR() antlr.TerminalNode + AllExpr() []IExprContext + Expr(i int) IExprContext + CLOSE_PAR() antlr.TerminalNode + AllCOMMA() []antlr.TerminalNode + COMMA(i int) antlr.TerminalNode + + // IsValue_rowContext differentiates from other interfaces. + IsValue_rowContext() +} + +type Value_rowContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyValue_rowContext() *Value_rowContext { + var p = new(Value_rowContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_value_row + return p +} + +func (*Value_rowContext) IsValue_rowContext() {} + +func NewValue_rowContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Value_rowContext { + var p = new(Value_rowContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_value_row + + return p +} + +func (s *Value_rowContext) GetParser() antlr.Parser { return s.parser } + +func (s *Value_rowContext) OPEN_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserOPEN_PAR, 0) +} + +func (s *Value_rowContext) AllExpr() []IExprContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IExprContext); ok { + len++ + } + } + + tst := make([]IExprContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IExprContext); ok { + tst[i] = t.(IExprContext) + i++ + } + } + + return tst +} + +func (s *Value_rowContext) Expr(i int) IExprContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IExprContext) +} + +func (s *Value_rowContext) CLOSE_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserCLOSE_PAR, 0) +} + +func (s *Value_rowContext) AllCOMMA() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserCOMMA) +} + +func (s *Value_rowContext) COMMA(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserCOMMA, i) +} + +func (s *Value_rowContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Value_rowContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Value_rowContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterValue_row(s) + } +} + +func (s *Value_rowContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitValue_row(s) + } +} + +func (p *SQLiteParser) Value_row() (localctx IValue_rowContext) { + this := p + _ = this + + localctx = NewValue_rowContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 70, SQLiteParserRULE_value_row) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(1100) + p.Match(SQLiteParserOPEN_PAR) + } + { + p.SetState(1101) + p.expr(0) + } + p.SetState(1106) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for _la == SQLiteParserCOMMA { + { + p.SetState(1102) + p.Match(SQLiteParserCOMMA) + } + { + p.SetState(1103) + p.expr(0) + } + + p.SetState(1108) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + { + p.SetState(1109) + p.Match(SQLiteParserCLOSE_PAR) + } + + return localctx +} + +// IValues_clauseContext is an interface to support dynamic dispatch. +type IValues_clauseContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + VALUES_() antlr.TerminalNode + AllValue_row() []IValue_rowContext + Value_row(i int) IValue_rowContext + AllCOMMA() []antlr.TerminalNode + COMMA(i int) antlr.TerminalNode + + // IsValues_clauseContext differentiates from other interfaces. + IsValues_clauseContext() +} + +type Values_clauseContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyValues_clauseContext() *Values_clauseContext { + var p = new(Values_clauseContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_values_clause + return p +} + +func (*Values_clauseContext) IsValues_clauseContext() {} + +func NewValues_clauseContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Values_clauseContext { + var p = new(Values_clauseContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_values_clause + + return p +} + +func (s *Values_clauseContext) GetParser() antlr.Parser { return s.parser } + +func (s *Values_clauseContext) VALUES_() antlr.TerminalNode { + return s.GetToken(SQLiteParserVALUES_, 0) +} + +func (s *Values_clauseContext) AllValue_row() []IValue_rowContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IValue_rowContext); ok { + len++ + } + } + + tst := make([]IValue_rowContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IValue_rowContext); ok { + tst[i] = t.(IValue_rowContext) + i++ + } + } + + return tst +} + +func (s *Values_clauseContext) Value_row(i int) IValue_rowContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IValue_rowContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IValue_rowContext) +} + +func (s *Values_clauseContext) AllCOMMA() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserCOMMA) +} + +func (s *Values_clauseContext) COMMA(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserCOMMA, i) +} + +func (s *Values_clauseContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Values_clauseContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Values_clauseContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterValues_clause(s) + } +} + +func (s *Values_clauseContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitValues_clause(s) + } +} + +func (p *SQLiteParser) Values_clause() (localctx IValues_clauseContext) { + this := p + _ = this + + localctx = NewValues_clauseContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 72, SQLiteParserRULE_values_clause) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(1111) + p.Match(SQLiteParserVALUES_) + } + { + p.SetState(1112) + p.Value_row() + } + p.SetState(1117) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for _la == SQLiteParserCOMMA { + { + p.SetState(1113) + p.Match(SQLiteParserCOMMA) + } + { + p.SetState(1114) + p.Value_row() + } + + p.SetState(1119) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + + return localctx +} + +// IInsert_stmtContext is an interface to support dynamic dispatch. +type IInsert_stmtContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + INTO_() antlr.TerminalNode + Table_name() ITable_nameContext + INSERT_() antlr.TerminalNode + REPLACE_() antlr.TerminalNode + OR_() antlr.TerminalNode + DEFAULT_() antlr.TerminalNode + VALUES_() antlr.TerminalNode + With_clause() IWith_clauseContext + ROLLBACK_() antlr.TerminalNode + ABORT_() antlr.TerminalNode + FAIL_() antlr.TerminalNode + IGNORE_() antlr.TerminalNode + Schema_name() ISchema_nameContext + DOT() antlr.TerminalNode + AS_() antlr.TerminalNode + Table_alias() ITable_aliasContext + OPEN_PAR() antlr.TerminalNode + AllColumn_name() []IColumn_nameContext + Column_name(i int) IColumn_nameContext + CLOSE_PAR() antlr.TerminalNode + Returning_clause() IReturning_clauseContext + AllCOMMA() []antlr.TerminalNode + COMMA(i int) antlr.TerminalNode + Values_clause() IValues_clauseContext + Select_stmt() ISelect_stmtContext + Upsert_clause() IUpsert_clauseContext + + // IsInsert_stmtContext differentiates from other interfaces. + IsInsert_stmtContext() +} + +type Insert_stmtContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyInsert_stmtContext() *Insert_stmtContext { + var p = new(Insert_stmtContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_insert_stmt + return p +} + +func (*Insert_stmtContext) IsInsert_stmtContext() {} + +func NewInsert_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Insert_stmtContext { + var p = new(Insert_stmtContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_insert_stmt + + return p +} + +func (s *Insert_stmtContext) GetParser() antlr.Parser { return s.parser } + +func (s *Insert_stmtContext) INTO_() antlr.TerminalNode { + return s.GetToken(SQLiteParserINTO_, 0) +} + +func (s *Insert_stmtContext) Table_name() ITable_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ITable_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ITable_nameContext) +} + +func (s *Insert_stmtContext) INSERT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserINSERT_, 0) +} + +func (s *Insert_stmtContext) REPLACE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserREPLACE_, 0) +} + +func (s *Insert_stmtContext) OR_() antlr.TerminalNode { + return s.GetToken(SQLiteParserOR_, 0) +} + +func (s *Insert_stmtContext) DEFAULT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserDEFAULT_, 0) +} + +func (s *Insert_stmtContext) VALUES_() antlr.TerminalNode { + return s.GetToken(SQLiteParserVALUES_, 0) +} + +func (s *Insert_stmtContext) With_clause() IWith_clauseContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IWith_clauseContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IWith_clauseContext) +} + +func (s *Insert_stmtContext) ROLLBACK_() antlr.TerminalNode { + return s.GetToken(SQLiteParserROLLBACK_, 0) +} + +func (s *Insert_stmtContext) ABORT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserABORT_, 0) +} + +func (s *Insert_stmtContext) FAIL_() antlr.TerminalNode { + return s.GetToken(SQLiteParserFAIL_, 0) +} + +func (s *Insert_stmtContext) IGNORE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserIGNORE_, 0) +} + +func (s *Insert_stmtContext) Schema_name() ISchema_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ISchema_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ISchema_nameContext) +} + +func (s *Insert_stmtContext) DOT() antlr.TerminalNode { + return s.GetToken(SQLiteParserDOT, 0) +} + +func (s *Insert_stmtContext) AS_() antlr.TerminalNode { + return s.GetToken(SQLiteParserAS_, 0) +} + +func (s *Insert_stmtContext) Table_alias() ITable_aliasContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ITable_aliasContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ITable_aliasContext) +} + +func (s *Insert_stmtContext) OPEN_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserOPEN_PAR, 0) +} + +func (s *Insert_stmtContext) AllColumn_name() []IColumn_nameContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IColumn_nameContext); ok { + len++ + } + } + + tst := make([]IColumn_nameContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IColumn_nameContext); ok { + tst[i] = t.(IColumn_nameContext) + i++ + } + } + + return tst +} + +func (s *Insert_stmtContext) Column_name(i int) IColumn_nameContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IColumn_nameContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IColumn_nameContext) +} + +func (s *Insert_stmtContext) CLOSE_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserCLOSE_PAR, 0) +} + +func (s *Insert_stmtContext) Returning_clause() IReturning_clauseContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IReturning_clauseContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IReturning_clauseContext) +} + +func (s *Insert_stmtContext) AllCOMMA() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserCOMMA) +} + +func (s *Insert_stmtContext) COMMA(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserCOMMA, i) +} + +func (s *Insert_stmtContext) Values_clause() IValues_clauseContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IValues_clauseContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IValues_clauseContext) +} + +func (s *Insert_stmtContext) Select_stmt() ISelect_stmtContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ISelect_stmtContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ISelect_stmtContext) +} + +func (s *Insert_stmtContext) Upsert_clause() IUpsert_clauseContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IUpsert_clauseContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IUpsert_clauseContext) +} + +func (s *Insert_stmtContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Insert_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Insert_stmtContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterInsert_stmt(s) + } +} + +func (s *Insert_stmtContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitInsert_stmt(s) + } +} + +func (p *SQLiteParser) Insert_stmt() (localctx IInsert_stmtContext) { + this := p + _ = this + + localctx = NewInsert_stmtContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 74, SQLiteParserRULE_insert_stmt) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + p.SetState(1121) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserWITH_ { + { + p.SetState(1120) + p.With_clause() + } + + } + p.SetState(1128) + p.GetErrorHandler().Sync(p) + switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 142, p.GetParserRuleContext()) { + case 1: + { + p.SetState(1123) + p.Match(SQLiteParserINSERT_) + } + + case 2: + { + p.SetState(1124) + p.Match(SQLiteParserREPLACE_) + } + + case 3: + { + p.SetState(1125) + p.Match(SQLiteParserINSERT_) + } + { + p.SetState(1126) + p.Match(SQLiteParserOR_) + } + { + p.SetState(1127) + _la = p.GetTokenStream().LA(1) + + if !(_la == SQLiteParserABORT_ || ((int64((_la-72)) & ^0x3f) == 0 && ((int64(1)<<(_la-72))&19140298416325121) != 0)) { + p.GetErrorHandler().RecoverInline(p) + } else { + p.GetErrorHandler().ReportMatch(p) + p.Consume() + } + } + + } + { + p.SetState(1130) + p.Match(SQLiteParserINTO_) + } + p.SetState(1134) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 143, p.GetParserRuleContext()) == 1 { + { + p.SetState(1131) + p.Schema_name() + } + { + p.SetState(1132) + p.Match(SQLiteParserDOT) + } + + } + { + p.SetState(1136) + p.Table_name() + } + p.SetState(1139) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserAS_ { + { + p.SetState(1137) + p.Match(SQLiteParserAS_) + } + { + p.SetState(1138) + p.Table_alias() + } + + } + p.SetState(1152) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserOPEN_PAR { + { + p.SetState(1141) + p.Match(SQLiteParserOPEN_PAR) + } + { + p.SetState(1142) + p.Column_name() + } + p.SetState(1147) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for _la == SQLiteParserCOMMA { + { + p.SetState(1143) + p.Match(SQLiteParserCOMMA) + } + { + p.SetState(1144) + p.Column_name() + } + + p.SetState(1149) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + { + p.SetState(1150) + p.Match(SQLiteParserCLOSE_PAR) + } + + } + p.SetState(1163) + p.GetErrorHandler().Sync(p) + + switch p.GetTokenStream().LA(1) { + case SQLiteParserSELECT_, SQLiteParserVALUES_, SQLiteParserWITH_: + p.SetState(1156) + p.GetErrorHandler().Sync(p) + switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 147, p.GetParserRuleContext()) { + case 1: + { + p.SetState(1154) + p.Values_clause() + } + + case 2: + { + p.SetState(1155) + p.Select_stmt() + } + + } + p.SetState(1159) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserON_ { + { + p.SetState(1158) + p.Upsert_clause() + } + + } + + case SQLiteParserDEFAULT_: + { + p.SetState(1161) + p.Match(SQLiteParserDEFAULT_) + } + { + p.SetState(1162) + p.Match(SQLiteParserVALUES_) + } + + default: + panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) + } + p.SetState(1166) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserRETURNING_ { + { + p.SetState(1165) + p.Returning_clause() + } + + } + + return localctx +} + +// IReturning_clauseContext is an interface to support dynamic dispatch. +type IReturning_clauseContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + RETURNING_() antlr.TerminalNode + AllResult_column() []IResult_columnContext + Result_column(i int) IResult_columnContext + AllCOMMA() []antlr.TerminalNode + COMMA(i int) antlr.TerminalNode + + // IsReturning_clauseContext differentiates from other interfaces. + IsReturning_clauseContext() +} + +type Returning_clauseContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyReturning_clauseContext() *Returning_clauseContext { + var p = new(Returning_clauseContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_returning_clause + return p +} + +func (*Returning_clauseContext) IsReturning_clauseContext() {} + +func NewReturning_clauseContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Returning_clauseContext { + var p = new(Returning_clauseContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_returning_clause + + return p +} + +func (s *Returning_clauseContext) GetParser() antlr.Parser { return s.parser } + +func (s *Returning_clauseContext) RETURNING_() antlr.TerminalNode { + return s.GetToken(SQLiteParserRETURNING_, 0) +} + +func (s *Returning_clauseContext) AllResult_column() []IResult_columnContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IResult_columnContext); ok { + len++ + } + } + + tst := make([]IResult_columnContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IResult_columnContext); ok { + tst[i] = t.(IResult_columnContext) + i++ + } + } + + return tst +} + +func (s *Returning_clauseContext) Result_column(i int) IResult_columnContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IResult_columnContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IResult_columnContext) +} + +func (s *Returning_clauseContext) AllCOMMA() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserCOMMA) +} + +func (s *Returning_clauseContext) COMMA(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserCOMMA, i) +} + +func (s *Returning_clauseContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Returning_clauseContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Returning_clauseContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterReturning_clause(s) + } +} + +func (s *Returning_clauseContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitReturning_clause(s) + } +} + +func (p *SQLiteParser) Returning_clause() (localctx IReturning_clauseContext) { + this := p + _ = this + + localctx = NewReturning_clauseContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 76, SQLiteParserRULE_returning_clause) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(1168) + p.Match(SQLiteParserRETURNING_) + } + { + p.SetState(1169) + p.Result_column() + } + p.SetState(1174) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for _la == SQLiteParserCOMMA { + { + p.SetState(1170) + p.Match(SQLiteParserCOMMA) + } + { + p.SetState(1171) + p.Result_column() + } + + p.SetState(1176) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + + return localctx +} + +// IUpsert_clauseContext is an interface to support dynamic dispatch. +type IUpsert_clauseContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + ON_() antlr.TerminalNode + CONFLICT_() antlr.TerminalNode + DO_() antlr.TerminalNode + NOTHING_() antlr.TerminalNode + UPDATE_() antlr.TerminalNode + SET_() antlr.TerminalNode + OPEN_PAR() antlr.TerminalNode + AllIndexed_column() []IIndexed_columnContext + Indexed_column(i int) IIndexed_columnContext + CLOSE_PAR() antlr.TerminalNode + AllASSIGN() []antlr.TerminalNode + ASSIGN(i int) antlr.TerminalNode + AllExpr() []IExprContext + Expr(i int) IExprContext + AllCOMMA() []antlr.TerminalNode + COMMA(i int) antlr.TerminalNode + AllWHERE_() []antlr.TerminalNode + WHERE_(i int) antlr.TerminalNode + AllColumn_name() []IColumn_nameContext + Column_name(i int) IColumn_nameContext + AllColumn_name_list() []IColumn_name_listContext + Column_name_list(i int) IColumn_name_listContext + + // IsUpsert_clauseContext differentiates from other interfaces. + IsUpsert_clauseContext() +} + +type Upsert_clauseContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyUpsert_clauseContext() *Upsert_clauseContext { + var p = new(Upsert_clauseContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_upsert_clause + return p +} + +func (*Upsert_clauseContext) IsUpsert_clauseContext() {} + +func NewUpsert_clauseContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Upsert_clauseContext { + var p = new(Upsert_clauseContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_upsert_clause + + return p +} + +func (s *Upsert_clauseContext) GetParser() antlr.Parser { return s.parser } + +func (s *Upsert_clauseContext) ON_() antlr.TerminalNode { + return s.GetToken(SQLiteParserON_, 0) +} + +func (s *Upsert_clauseContext) CONFLICT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserCONFLICT_, 0) +} + +func (s *Upsert_clauseContext) DO_() antlr.TerminalNode { + return s.GetToken(SQLiteParserDO_, 0) +} + +func (s *Upsert_clauseContext) NOTHING_() antlr.TerminalNode { + return s.GetToken(SQLiteParserNOTHING_, 0) +} + +func (s *Upsert_clauseContext) UPDATE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserUPDATE_, 0) +} + +func (s *Upsert_clauseContext) SET_() antlr.TerminalNode { + return s.GetToken(SQLiteParserSET_, 0) +} + +func (s *Upsert_clauseContext) OPEN_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserOPEN_PAR, 0) +} + +func (s *Upsert_clauseContext) AllIndexed_column() []IIndexed_columnContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IIndexed_columnContext); ok { + len++ + } + } + + tst := make([]IIndexed_columnContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IIndexed_columnContext); ok { + tst[i] = t.(IIndexed_columnContext) + i++ + } + } + + return tst +} + +func (s *Upsert_clauseContext) Indexed_column(i int) IIndexed_columnContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IIndexed_columnContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IIndexed_columnContext) +} + +func (s *Upsert_clauseContext) CLOSE_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserCLOSE_PAR, 0) +} + +func (s *Upsert_clauseContext) AllASSIGN() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserASSIGN) +} + +func (s *Upsert_clauseContext) ASSIGN(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserASSIGN, i) +} + +func (s *Upsert_clauseContext) AllExpr() []IExprContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IExprContext); ok { + len++ + } + } + + tst := make([]IExprContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IExprContext); ok { + tst[i] = t.(IExprContext) + i++ + } + } + + return tst +} + +func (s *Upsert_clauseContext) Expr(i int) IExprContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IExprContext) +} + +func (s *Upsert_clauseContext) AllCOMMA() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserCOMMA) +} + +func (s *Upsert_clauseContext) COMMA(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserCOMMA, i) +} + +func (s *Upsert_clauseContext) AllWHERE_() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserWHERE_) +} + +func (s *Upsert_clauseContext) WHERE_(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserWHERE_, i) +} + +func (s *Upsert_clauseContext) AllColumn_name() []IColumn_nameContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IColumn_nameContext); ok { + len++ + } + } + + tst := make([]IColumn_nameContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IColumn_nameContext); ok { + tst[i] = t.(IColumn_nameContext) + i++ + } + } + + return tst +} + +func (s *Upsert_clauseContext) Column_name(i int) IColumn_nameContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IColumn_nameContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IColumn_nameContext) +} + +func (s *Upsert_clauseContext) AllColumn_name_list() []IColumn_name_listContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IColumn_name_listContext); ok { + len++ + } + } + + tst := make([]IColumn_name_listContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IColumn_name_listContext); ok { + tst[i] = t.(IColumn_name_listContext) + i++ + } + } + + return tst +} + +func (s *Upsert_clauseContext) Column_name_list(i int) IColumn_name_listContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IColumn_name_listContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IColumn_name_listContext) +} + +func (s *Upsert_clauseContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Upsert_clauseContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Upsert_clauseContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterUpsert_clause(s) + } +} + +func (s *Upsert_clauseContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitUpsert_clause(s) + } +} + +func (p *SQLiteParser) Upsert_clause() (localctx IUpsert_clauseContext) { + this := p + _ = this + + localctx = NewUpsert_clauseContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 78, SQLiteParserRULE_upsert_clause) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(1177) + p.Match(SQLiteParserON_) + } + { + p.SetState(1178) + p.Match(SQLiteParserCONFLICT_) + } + p.SetState(1193) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserOPEN_PAR { + { + p.SetState(1179) + p.Match(SQLiteParserOPEN_PAR) + } + { + p.SetState(1180) + p.Indexed_column() + } + p.SetState(1185) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for _la == SQLiteParserCOMMA { + { + p.SetState(1181) + p.Match(SQLiteParserCOMMA) + } + { + p.SetState(1182) + p.Indexed_column() + } + + p.SetState(1187) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + { + p.SetState(1188) + p.Match(SQLiteParserCLOSE_PAR) + } + p.SetState(1191) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserWHERE_ { + { + p.SetState(1189) + p.Match(SQLiteParserWHERE_) + } + { + p.SetState(1190) + p.expr(0) + } + + } + + } + { + p.SetState(1195) + p.Match(SQLiteParserDO_) + } + p.SetState(1222) + p.GetErrorHandler().Sync(p) + + switch p.GetTokenStream().LA(1) { + case SQLiteParserNOTHING_: + { + p.SetState(1196) + p.Match(SQLiteParserNOTHING_) + } + + case SQLiteParserUPDATE_: + { + p.SetState(1197) + p.Match(SQLiteParserUPDATE_) + } + { + p.SetState(1198) + p.Match(SQLiteParserSET_) + } + + p.SetState(1201) + p.GetErrorHandler().Sync(p) + switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 155, p.GetParserRuleContext()) { + case 1: + { + p.SetState(1199) + p.Column_name() + } + + case 2: + { + p.SetState(1200) + p.Column_name_list() + } + + } + { + p.SetState(1203) + p.Match(SQLiteParserASSIGN) + } + { + p.SetState(1204) + p.expr(0) + } + p.SetState(1215) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for _la == SQLiteParserCOMMA { + { + p.SetState(1205) + p.Match(SQLiteParserCOMMA) + } + p.SetState(1208) + p.GetErrorHandler().Sync(p) + switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 156, p.GetParserRuleContext()) { + case 1: + { + p.SetState(1206) + p.Column_name() + } + + case 2: + { + p.SetState(1207) + p.Column_name_list() + } + + } + { + p.SetState(1210) + p.Match(SQLiteParserASSIGN) + } + { + p.SetState(1211) + p.expr(0) + } + + p.SetState(1217) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + p.SetState(1220) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserWHERE_ { + { + p.SetState(1218) + p.Match(SQLiteParserWHERE_) + } + { + p.SetState(1219) + p.expr(0) + } + + } + + default: + panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) + } + + return localctx +} + +// IPragma_stmtContext is an interface to support dynamic dispatch. +type IPragma_stmtContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + PRAGMA_() antlr.TerminalNode + Pragma_name() IPragma_nameContext + Schema_name() ISchema_nameContext + DOT() antlr.TerminalNode + ASSIGN() antlr.TerminalNode + Pragma_value() IPragma_valueContext + OPEN_PAR() antlr.TerminalNode + CLOSE_PAR() antlr.TerminalNode + + // IsPragma_stmtContext differentiates from other interfaces. + IsPragma_stmtContext() +} + +type Pragma_stmtContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyPragma_stmtContext() *Pragma_stmtContext { + var p = new(Pragma_stmtContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_pragma_stmt + return p +} + +func (*Pragma_stmtContext) IsPragma_stmtContext() {} + +func NewPragma_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Pragma_stmtContext { + var p = new(Pragma_stmtContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_pragma_stmt + + return p +} + +func (s *Pragma_stmtContext) GetParser() antlr.Parser { return s.parser } + +func (s *Pragma_stmtContext) PRAGMA_() antlr.TerminalNode { + return s.GetToken(SQLiteParserPRAGMA_, 0) +} + +func (s *Pragma_stmtContext) Pragma_name() IPragma_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IPragma_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IPragma_nameContext) +} + +func (s *Pragma_stmtContext) Schema_name() ISchema_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ISchema_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ISchema_nameContext) +} + +func (s *Pragma_stmtContext) DOT() antlr.TerminalNode { + return s.GetToken(SQLiteParserDOT, 0) +} + +func (s *Pragma_stmtContext) ASSIGN() antlr.TerminalNode { + return s.GetToken(SQLiteParserASSIGN, 0) +} + +func (s *Pragma_stmtContext) Pragma_value() IPragma_valueContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IPragma_valueContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IPragma_valueContext) +} + +func (s *Pragma_stmtContext) OPEN_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserOPEN_PAR, 0) +} + +func (s *Pragma_stmtContext) CLOSE_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserCLOSE_PAR, 0) +} + +func (s *Pragma_stmtContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Pragma_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Pragma_stmtContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterPragma_stmt(s) + } +} + +func (s *Pragma_stmtContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitPragma_stmt(s) + } +} + +func (p *SQLiteParser) Pragma_stmt() (localctx IPragma_stmtContext) { + this := p + _ = this + + localctx = NewPragma_stmtContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 80, SQLiteParserRULE_pragma_stmt) + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(1224) + p.Match(SQLiteParserPRAGMA_) + } + p.SetState(1228) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 160, p.GetParserRuleContext()) == 1 { + { + p.SetState(1225) + p.Schema_name() + } + { + p.SetState(1226) + p.Match(SQLiteParserDOT) + } + + } + { + p.SetState(1230) + p.Pragma_name() + } + p.SetState(1237) + p.GetErrorHandler().Sync(p) + + switch p.GetTokenStream().LA(1) { + case SQLiteParserASSIGN: + { + p.SetState(1231) + p.Match(SQLiteParserASSIGN) + } + { + p.SetState(1232) + p.Pragma_value() + } + + case SQLiteParserOPEN_PAR: + { + p.SetState(1233) + p.Match(SQLiteParserOPEN_PAR) + } + { + p.SetState(1234) + p.Pragma_value() + } + { + p.SetState(1235) + p.Match(SQLiteParserCLOSE_PAR) + } + + case SQLiteParserEOF, SQLiteParserSCOL, SQLiteParserALTER_, SQLiteParserANALYZE_, SQLiteParserATTACH_, SQLiteParserBEGIN_, SQLiteParserCOMMIT_, SQLiteParserCREATE_, SQLiteParserDELETE_, SQLiteParserDETACH_, SQLiteParserDROP_, SQLiteParserEND_, SQLiteParserEXPLAIN_, SQLiteParserINSERT_, SQLiteParserPRAGMA_, SQLiteParserREINDEX_, SQLiteParserRELEASE_, SQLiteParserREPLACE_, SQLiteParserROLLBACK_, SQLiteParserSAVEPOINT_, SQLiteParserSELECT_, SQLiteParserUPDATE_, SQLiteParserVACUUM_, SQLiteParserVALUES_, SQLiteParserWITH_: + + default: + } + + return localctx +} + +// IPragma_valueContext is an interface to support dynamic dispatch. +type IPragma_valueContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + Signed_number() ISigned_numberContext + Name() INameContext + STRING_LITERAL() antlr.TerminalNode + + // IsPragma_valueContext differentiates from other interfaces. + IsPragma_valueContext() +} + +type Pragma_valueContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyPragma_valueContext() *Pragma_valueContext { + var p = new(Pragma_valueContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_pragma_value + return p +} + +func (*Pragma_valueContext) IsPragma_valueContext() {} + +func NewPragma_valueContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Pragma_valueContext { + var p = new(Pragma_valueContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_pragma_value + + return p +} + +func (s *Pragma_valueContext) GetParser() antlr.Parser { return s.parser } + +func (s *Pragma_valueContext) Signed_number() ISigned_numberContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ISigned_numberContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ISigned_numberContext) +} + +func (s *Pragma_valueContext) Name() INameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(INameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(INameContext) +} + +func (s *Pragma_valueContext) STRING_LITERAL() antlr.TerminalNode { + return s.GetToken(SQLiteParserSTRING_LITERAL, 0) +} + +func (s *Pragma_valueContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Pragma_valueContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Pragma_valueContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterPragma_value(s) + } +} + +func (s *Pragma_valueContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitPragma_value(s) + } +} + +func (p *SQLiteParser) Pragma_value() (localctx IPragma_valueContext) { + this := p + _ = this + + localctx = NewPragma_valueContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 82, SQLiteParserRULE_pragma_value) + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.SetState(1242) + p.GetErrorHandler().Sync(p) + switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 162, p.GetParserRuleContext()) { + case 1: + p.EnterOuterAlt(localctx, 1) + { + p.SetState(1239) + p.Signed_number() + } + + case 2: + p.EnterOuterAlt(localctx, 2) + { + p.SetState(1240) + p.Name() + } + + case 3: + p.EnterOuterAlt(localctx, 3) + { + p.SetState(1241) + p.Match(SQLiteParserSTRING_LITERAL) + } + + } + + return localctx +} + +// IReindex_stmtContext is an interface to support dynamic dispatch. +type IReindex_stmtContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + REINDEX_() antlr.TerminalNode + Collation_name() ICollation_nameContext + Table_name() ITable_nameContext + Index_name() IIndex_nameContext + Schema_name() ISchema_nameContext + DOT() antlr.TerminalNode + + // IsReindex_stmtContext differentiates from other interfaces. + IsReindex_stmtContext() +} + +type Reindex_stmtContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyReindex_stmtContext() *Reindex_stmtContext { + var p = new(Reindex_stmtContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_reindex_stmt + return p +} + +func (*Reindex_stmtContext) IsReindex_stmtContext() {} + +func NewReindex_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Reindex_stmtContext { + var p = new(Reindex_stmtContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_reindex_stmt + + return p +} + +func (s *Reindex_stmtContext) GetParser() antlr.Parser { return s.parser } + +func (s *Reindex_stmtContext) REINDEX_() antlr.TerminalNode { + return s.GetToken(SQLiteParserREINDEX_, 0) +} + +func (s *Reindex_stmtContext) Collation_name() ICollation_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ICollation_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ICollation_nameContext) +} + +func (s *Reindex_stmtContext) Table_name() ITable_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ITable_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ITable_nameContext) +} + +func (s *Reindex_stmtContext) Index_name() IIndex_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IIndex_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IIndex_nameContext) +} + +func (s *Reindex_stmtContext) Schema_name() ISchema_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ISchema_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ISchema_nameContext) +} + +func (s *Reindex_stmtContext) DOT() antlr.TerminalNode { + return s.GetToken(SQLiteParserDOT, 0) +} + +func (s *Reindex_stmtContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Reindex_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Reindex_stmtContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterReindex_stmt(s) + } +} + +func (s *Reindex_stmtContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitReindex_stmt(s) + } +} + +func (p *SQLiteParser) Reindex_stmt() (localctx IReindex_stmtContext) { + this := p + _ = this + + localctx = NewReindex_stmtContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 84, SQLiteParserRULE_reindex_stmt) + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(1244) + p.Match(SQLiteParserREINDEX_) + } + p.SetState(1255) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 165, p.GetParserRuleContext()) == 1 { + { + p.SetState(1245) + p.Collation_name() + } + + } else if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 165, p.GetParserRuleContext()) == 2 { + p.SetState(1249) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 163, p.GetParserRuleContext()) == 1 { + { + p.SetState(1246) + p.Schema_name() + } + { + p.SetState(1247) + p.Match(SQLiteParserDOT) + } + + } + p.SetState(1253) + p.GetErrorHandler().Sync(p) + switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 164, p.GetParserRuleContext()) { + case 1: + { + p.SetState(1251) + p.Table_name() + } + + case 2: + { + p.SetState(1252) + p.Index_name() + } + + } + + } + + return localctx +} + +// ISelect_stmtContext is an interface to support dynamic dispatch. +type ISelect_stmtContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + AllSelect_core() []ISelect_coreContext + Select_core(i int) ISelect_coreContext + Common_table_stmt() ICommon_table_stmtContext + AllCompound_operator() []ICompound_operatorContext + Compound_operator(i int) ICompound_operatorContext + Order_by_stmt() IOrder_by_stmtContext + Limit_stmt() ILimit_stmtContext + + // IsSelect_stmtContext differentiates from other interfaces. + IsSelect_stmtContext() +} + +type Select_stmtContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptySelect_stmtContext() *Select_stmtContext { + var p = new(Select_stmtContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_select_stmt + return p +} + +func (*Select_stmtContext) IsSelect_stmtContext() {} + +func NewSelect_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Select_stmtContext { + var p = new(Select_stmtContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_select_stmt + + return p +} + +func (s *Select_stmtContext) GetParser() antlr.Parser { return s.parser } + +func (s *Select_stmtContext) AllSelect_core() []ISelect_coreContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(ISelect_coreContext); ok { + len++ + } + } + + tst := make([]ISelect_coreContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(ISelect_coreContext); ok { + tst[i] = t.(ISelect_coreContext) + i++ + } + } + + return tst +} + +func (s *Select_stmtContext) Select_core(i int) ISelect_coreContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ISelect_coreContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(ISelect_coreContext) +} + +func (s *Select_stmtContext) Common_table_stmt() ICommon_table_stmtContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ICommon_table_stmtContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ICommon_table_stmtContext) +} + +func (s *Select_stmtContext) AllCompound_operator() []ICompound_operatorContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(ICompound_operatorContext); ok { + len++ + } + } + + tst := make([]ICompound_operatorContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(ICompound_operatorContext); ok { + tst[i] = t.(ICompound_operatorContext) + i++ + } + } + + return tst +} + +func (s *Select_stmtContext) Compound_operator(i int) ICompound_operatorContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ICompound_operatorContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(ICompound_operatorContext) +} + +func (s *Select_stmtContext) Order_by_stmt() IOrder_by_stmtContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IOrder_by_stmtContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IOrder_by_stmtContext) +} + +func (s *Select_stmtContext) Limit_stmt() ILimit_stmtContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ILimit_stmtContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ILimit_stmtContext) +} + +func (s *Select_stmtContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Select_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Select_stmtContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterSelect_stmt(s) + } +} + +func (s *Select_stmtContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitSelect_stmt(s) + } +} + +func (p *SQLiteParser) Select_stmt() (localctx ISelect_stmtContext) { + this := p + _ = this + + localctx = NewSelect_stmtContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 86, SQLiteParserRULE_select_stmt) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + var _alt int + + p.EnterOuterAlt(localctx, 1) + p.SetState(1258) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserWITH_ { + { + p.SetState(1257) + p.Common_table_stmt() + } + + } + { + p.SetState(1260) + p.Select_core() + } + p.SetState(1266) + p.GetErrorHandler().Sync(p) + _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 167, p.GetParserRuleContext()) + + for _alt != 2 && _alt != antlr.ATNInvalidAltNumber { + if _alt == 1 { + { + p.SetState(1261) + p.Compound_operator() + } + { + p.SetState(1262) + p.Select_core() + } + + } + p.SetState(1268) + p.GetErrorHandler().Sync(p) + _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 167, p.GetParserRuleContext()) + } + p.SetState(1270) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserORDER_ { + { + p.SetState(1269) + p.Order_by_stmt() + } + + } + p.SetState(1273) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserLIMIT_ { + { + p.SetState(1272) + p.Limit_stmt() + } + + } + + return localctx +} + +// IJoin_clauseContext is an interface to support dynamic dispatch. +type IJoin_clauseContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + AllTable_or_subquery() []ITable_or_subqueryContext + Table_or_subquery(i int) ITable_or_subqueryContext + AllJoin_operator() []IJoin_operatorContext + Join_operator(i int) IJoin_operatorContext + AllJoin_constraint() []IJoin_constraintContext + Join_constraint(i int) IJoin_constraintContext + + // IsJoin_clauseContext differentiates from other interfaces. + IsJoin_clauseContext() +} + +type Join_clauseContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyJoin_clauseContext() *Join_clauseContext { + var p = new(Join_clauseContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_join_clause + return p +} + +func (*Join_clauseContext) IsJoin_clauseContext() {} + +func NewJoin_clauseContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Join_clauseContext { + var p = new(Join_clauseContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_join_clause + + return p +} + +func (s *Join_clauseContext) GetParser() antlr.Parser { return s.parser } + +func (s *Join_clauseContext) AllTable_or_subquery() []ITable_or_subqueryContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(ITable_or_subqueryContext); ok { + len++ + } + } + + tst := make([]ITable_or_subqueryContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(ITable_or_subqueryContext); ok { + tst[i] = t.(ITable_or_subqueryContext) + i++ + } + } + + return tst +} + +func (s *Join_clauseContext) Table_or_subquery(i int) ITable_or_subqueryContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ITable_or_subqueryContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(ITable_or_subqueryContext) +} + +func (s *Join_clauseContext) AllJoin_operator() []IJoin_operatorContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IJoin_operatorContext); ok { + len++ + } + } + + tst := make([]IJoin_operatorContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IJoin_operatorContext); ok { + tst[i] = t.(IJoin_operatorContext) + i++ + } + } + + return tst +} + +func (s *Join_clauseContext) Join_operator(i int) IJoin_operatorContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IJoin_operatorContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IJoin_operatorContext) +} + +func (s *Join_clauseContext) AllJoin_constraint() []IJoin_constraintContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IJoin_constraintContext); ok { + len++ + } + } + + tst := make([]IJoin_constraintContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IJoin_constraintContext); ok { + tst[i] = t.(IJoin_constraintContext) + i++ + } + } + + return tst +} + +func (s *Join_clauseContext) Join_constraint(i int) IJoin_constraintContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IJoin_constraintContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IJoin_constraintContext) +} + +func (s *Join_clauseContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Join_clauseContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Join_clauseContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterJoin_clause(s) + } +} + +func (s *Join_clauseContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitJoin_clause(s) + } +} + +func (p *SQLiteParser) Join_clause() (localctx IJoin_clauseContext) { + this := p + _ = this + + localctx = NewJoin_clauseContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 88, SQLiteParserRULE_join_clause) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(1275) + p.Table_or_subquery() + } + p.SetState(1283) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for _la == SQLiteParserCOMMA || _la == SQLiteParserCROSS_ || ((int64((_la-87)) & ^0x3f) == 0 && ((int64(1)<<(_la-87))&8833) != 0) { + { + p.SetState(1276) + p.Join_operator() + } + { + p.SetState(1277) + p.Table_or_subquery() + } + p.SetState(1279) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 170, p.GetParserRuleContext()) == 1 { + { + p.SetState(1278) + p.Join_constraint() + } + + } + + p.SetState(1285) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + + return localctx +} + +// ISelect_coreContext is an interface to support dynamic dispatch. +type ISelect_coreContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // GetWhereExpr returns the whereExpr rule contexts. + GetWhereExpr() IExprContext + + // Get_expr returns the _expr rule contexts. + Get_expr() IExprContext + + // GetHavingExpr returns the havingExpr rule contexts. + GetHavingExpr() IExprContext + + // SetWhereExpr sets the whereExpr rule contexts. + SetWhereExpr(IExprContext) + + // Set_expr sets the _expr rule contexts. + Set_expr(IExprContext) + + // SetHavingExpr sets the havingExpr rule contexts. + SetHavingExpr(IExprContext) + + // GetGroupByExpr returns the groupByExpr rule context list. + GetGroupByExpr() []IExprContext + + // SetGroupByExpr sets the groupByExpr rule context list. + SetGroupByExpr([]IExprContext) + + // Getter signatures + SELECT_() antlr.TerminalNode + AllResult_column() []IResult_columnContext + Result_column(i int) IResult_columnContext + AllCOMMA() []antlr.TerminalNode + COMMA(i int) antlr.TerminalNode + FROM_() antlr.TerminalNode + WHERE_() antlr.TerminalNode + GROUP_() antlr.TerminalNode + BY_() antlr.TerminalNode + WINDOW_() antlr.TerminalNode + AllWindow_name() []IWindow_nameContext + Window_name(i int) IWindow_nameContext + AllAS_() []antlr.TerminalNode + AS_(i int) antlr.TerminalNode + AllWindow_defn() []IWindow_defnContext + Window_defn(i int) IWindow_defnContext + DISTINCT_() antlr.TerminalNode + ALL_() antlr.TerminalNode + AllExpr() []IExprContext + Expr(i int) IExprContext + AllTable_or_subquery() []ITable_or_subqueryContext + Table_or_subquery(i int) ITable_or_subqueryContext + Join_clause() IJoin_clauseContext + HAVING_() antlr.TerminalNode + Values_clause() IValues_clauseContext + + // IsSelect_coreContext differentiates from other interfaces. + IsSelect_coreContext() +} + +type Select_coreContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser + whereExpr IExprContext + _expr IExprContext + groupByExpr []IExprContext + havingExpr IExprContext +} + +func NewEmptySelect_coreContext() *Select_coreContext { + var p = new(Select_coreContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_select_core + return p +} + +func (*Select_coreContext) IsSelect_coreContext() {} + +func NewSelect_coreContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Select_coreContext { + var p = new(Select_coreContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_select_core + + return p +} + +func (s *Select_coreContext) GetParser() antlr.Parser { return s.parser } + +func (s *Select_coreContext) GetWhereExpr() IExprContext { return s.whereExpr } + +func (s *Select_coreContext) Get_expr() IExprContext { return s._expr } + +func (s *Select_coreContext) GetHavingExpr() IExprContext { return s.havingExpr } + +func (s *Select_coreContext) SetWhereExpr(v IExprContext) { s.whereExpr = v } + +func (s *Select_coreContext) Set_expr(v IExprContext) { s._expr = v } + +func (s *Select_coreContext) SetHavingExpr(v IExprContext) { s.havingExpr = v } + +func (s *Select_coreContext) GetGroupByExpr() []IExprContext { return s.groupByExpr } + +func (s *Select_coreContext) SetGroupByExpr(v []IExprContext) { s.groupByExpr = v } + +func (s *Select_coreContext) SELECT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserSELECT_, 0) +} + +func (s *Select_coreContext) AllResult_column() []IResult_columnContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IResult_columnContext); ok { + len++ + } + } + + tst := make([]IResult_columnContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IResult_columnContext); ok { + tst[i] = t.(IResult_columnContext) + i++ + } + } + + return tst +} + +func (s *Select_coreContext) Result_column(i int) IResult_columnContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IResult_columnContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IResult_columnContext) +} + +func (s *Select_coreContext) AllCOMMA() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserCOMMA) +} + +func (s *Select_coreContext) COMMA(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserCOMMA, i) +} + +func (s *Select_coreContext) FROM_() antlr.TerminalNode { + return s.GetToken(SQLiteParserFROM_, 0) +} + +func (s *Select_coreContext) WHERE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserWHERE_, 0) +} + +func (s *Select_coreContext) GROUP_() antlr.TerminalNode { + return s.GetToken(SQLiteParserGROUP_, 0) +} + +func (s *Select_coreContext) BY_() antlr.TerminalNode { + return s.GetToken(SQLiteParserBY_, 0) +} + +func (s *Select_coreContext) WINDOW_() antlr.TerminalNode { + return s.GetToken(SQLiteParserWINDOW_, 0) +} + +func (s *Select_coreContext) AllWindow_name() []IWindow_nameContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IWindow_nameContext); ok { + len++ + } + } + + tst := make([]IWindow_nameContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IWindow_nameContext); ok { + tst[i] = t.(IWindow_nameContext) + i++ + } + } + + return tst +} + +func (s *Select_coreContext) Window_name(i int) IWindow_nameContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IWindow_nameContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IWindow_nameContext) +} + +func (s *Select_coreContext) AllAS_() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserAS_) +} + +func (s *Select_coreContext) AS_(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserAS_, i) +} + +func (s *Select_coreContext) AllWindow_defn() []IWindow_defnContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IWindow_defnContext); ok { + len++ + } + } + + tst := make([]IWindow_defnContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IWindow_defnContext); ok { + tst[i] = t.(IWindow_defnContext) + i++ + } + } + + return tst +} + +func (s *Select_coreContext) Window_defn(i int) IWindow_defnContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IWindow_defnContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IWindow_defnContext) +} + +func (s *Select_coreContext) DISTINCT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserDISTINCT_, 0) +} + +func (s *Select_coreContext) ALL_() antlr.TerminalNode { + return s.GetToken(SQLiteParserALL_, 0) +} + +func (s *Select_coreContext) AllExpr() []IExprContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IExprContext); ok { + len++ + } + } + + tst := make([]IExprContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IExprContext); ok { + tst[i] = t.(IExprContext) + i++ + } + } + + return tst +} + +func (s *Select_coreContext) Expr(i int) IExprContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IExprContext) +} + +func (s *Select_coreContext) AllTable_or_subquery() []ITable_or_subqueryContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(ITable_or_subqueryContext); ok { + len++ + } + } + + tst := make([]ITable_or_subqueryContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(ITable_or_subqueryContext); ok { + tst[i] = t.(ITable_or_subqueryContext) + i++ + } + } + + return tst +} + +func (s *Select_coreContext) Table_or_subquery(i int) ITable_or_subqueryContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ITable_or_subqueryContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(ITable_or_subqueryContext) +} + +func (s *Select_coreContext) Join_clause() IJoin_clauseContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IJoin_clauseContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IJoin_clauseContext) +} + +func (s *Select_coreContext) HAVING_() antlr.TerminalNode { + return s.GetToken(SQLiteParserHAVING_, 0) +} + +func (s *Select_coreContext) Values_clause() IValues_clauseContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IValues_clauseContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IValues_clauseContext) +} + +func (s *Select_coreContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Select_coreContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Select_coreContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterSelect_core(s) + } +} + +func (s *Select_coreContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitSelect_core(s) + } +} + +func (p *SQLiteParser) Select_core() (localctx ISelect_coreContext) { + this := p + _ = this + + localctx = NewSelect_coreContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 90, SQLiteParserRULE_select_core) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.SetState(1349) + p.GetErrorHandler().Sync(p) + + switch p.GetTokenStream().LA(1) { + case SQLiteParserSELECT_: + p.EnterOuterAlt(localctx, 1) + { + p.SetState(1286) + p.Match(SQLiteParserSELECT_) + } + p.SetState(1288) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 172, p.GetParserRuleContext()) == 1 { + { + p.SetState(1287) + _la = p.GetTokenStream().LA(1) + + if !(_la == SQLiteParserALL_ || _la == SQLiteParserDISTINCT_) { + p.GetErrorHandler().RecoverInline(p) + } else { + p.GetErrorHandler().ReportMatch(p) + p.Consume() + } + } + + } + { + p.SetState(1290) + p.Result_column() + } + p.SetState(1295) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for _la == SQLiteParserCOMMA { + { + p.SetState(1291) + p.Match(SQLiteParserCOMMA) + } + { + p.SetState(1292) + p.Result_column() + } + + p.SetState(1297) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + p.SetState(1310) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserFROM_ { + { + p.SetState(1298) + p.Match(SQLiteParserFROM_) + } + p.SetState(1308) + p.GetErrorHandler().Sync(p) + switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 175, p.GetParserRuleContext()) { + case 1: + { + p.SetState(1299) + p.Table_or_subquery() + } + p.SetState(1304) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for _la == SQLiteParserCOMMA { + { + p.SetState(1300) + p.Match(SQLiteParserCOMMA) + } + { + p.SetState(1301) + p.Table_or_subquery() + } + + p.SetState(1306) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + + case 2: + { + p.SetState(1307) + p.Join_clause() + } + + } + + } + p.SetState(1314) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserWHERE_ { + { + p.SetState(1312) + p.Match(SQLiteParserWHERE_) + } + { + p.SetState(1313) + + var _x = p.expr(0) + + localctx.(*Select_coreContext).whereExpr = _x + } + + } + p.SetState(1330) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserGROUP_ { + { + p.SetState(1316) + p.Match(SQLiteParserGROUP_) + } + { + p.SetState(1317) + p.Match(SQLiteParserBY_) + } + { + p.SetState(1318) + + var _x = p.expr(0) + + localctx.(*Select_coreContext)._expr = _x + } + localctx.(*Select_coreContext).groupByExpr = append(localctx.(*Select_coreContext).groupByExpr, localctx.(*Select_coreContext)._expr) + p.SetState(1323) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for _la == SQLiteParserCOMMA { + { + p.SetState(1319) + p.Match(SQLiteParserCOMMA) + } + { + p.SetState(1320) + + var _x = p.expr(0) + + localctx.(*Select_coreContext)._expr = _x + } + localctx.(*Select_coreContext).groupByExpr = append(localctx.(*Select_coreContext).groupByExpr, localctx.(*Select_coreContext)._expr) + + p.SetState(1325) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + p.SetState(1328) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserHAVING_ { + { + p.SetState(1326) + p.Match(SQLiteParserHAVING_) + } + { + p.SetState(1327) + + var _x = p.expr(0) + + localctx.(*Select_coreContext).havingExpr = _x + } + + } + + } + p.SetState(1346) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserWINDOW_ { + { + p.SetState(1332) + p.Match(SQLiteParserWINDOW_) + } + { + p.SetState(1333) + p.Window_name() + } + { + p.SetState(1334) + p.Match(SQLiteParserAS_) + } + { + p.SetState(1335) + p.Window_defn() + } + p.SetState(1343) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for _la == SQLiteParserCOMMA { + { + p.SetState(1336) + p.Match(SQLiteParserCOMMA) + } + { + p.SetState(1337) + p.Window_name() + } + { + p.SetState(1338) + p.Match(SQLiteParserAS_) + } + { + p.SetState(1339) + p.Window_defn() + } + + p.SetState(1345) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + + } + + case SQLiteParserVALUES_: + p.EnterOuterAlt(localctx, 2) + { + p.SetState(1348) + p.Values_clause() + } + + default: + panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) + } + + return localctx +} + +// IFactored_select_stmtContext is an interface to support dynamic dispatch. +type IFactored_select_stmtContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + Select_stmt() ISelect_stmtContext + + // IsFactored_select_stmtContext differentiates from other interfaces. + IsFactored_select_stmtContext() +} + +type Factored_select_stmtContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyFactored_select_stmtContext() *Factored_select_stmtContext { + var p = new(Factored_select_stmtContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_factored_select_stmt + return p +} + +func (*Factored_select_stmtContext) IsFactored_select_stmtContext() {} + +func NewFactored_select_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Factored_select_stmtContext { + var p = new(Factored_select_stmtContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_factored_select_stmt + + return p +} + +func (s *Factored_select_stmtContext) GetParser() antlr.Parser { return s.parser } + +func (s *Factored_select_stmtContext) Select_stmt() ISelect_stmtContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ISelect_stmtContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ISelect_stmtContext) +} + +func (s *Factored_select_stmtContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Factored_select_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Factored_select_stmtContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterFactored_select_stmt(s) + } +} + +func (s *Factored_select_stmtContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitFactored_select_stmt(s) + } +} + +func (p *SQLiteParser) Factored_select_stmt() (localctx IFactored_select_stmtContext) { + this := p + _ = this + + localctx = NewFactored_select_stmtContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 92, SQLiteParserRULE_factored_select_stmt) + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(1351) + p.Select_stmt() + } + + return localctx +} + +// ISimple_select_stmtContext is an interface to support dynamic dispatch. +type ISimple_select_stmtContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + Select_core() ISelect_coreContext + Common_table_stmt() ICommon_table_stmtContext + Order_by_stmt() IOrder_by_stmtContext + Limit_stmt() ILimit_stmtContext + + // IsSimple_select_stmtContext differentiates from other interfaces. + IsSimple_select_stmtContext() +} + +type Simple_select_stmtContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptySimple_select_stmtContext() *Simple_select_stmtContext { + var p = new(Simple_select_stmtContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_simple_select_stmt + return p +} + +func (*Simple_select_stmtContext) IsSimple_select_stmtContext() {} + +func NewSimple_select_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Simple_select_stmtContext { + var p = new(Simple_select_stmtContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_simple_select_stmt + + return p +} + +func (s *Simple_select_stmtContext) GetParser() antlr.Parser { return s.parser } + +func (s *Simple_select_stmtContext) Select_core() ISelect_coreContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ISelect_coreContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ISelect_coreContext) +} + +func (s *Simple_select_stmtContext) Common_table_stmt() ICommon_table_stmtContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ICommon_table_stmtContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ICommon_table_stmtContext) +} + +func (s *Simple_select_stmtContext) Order_by_stmt() IOrder_by_stmtContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IOrder_by_stmtContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IOrder_by_stmtContext) +} + +func (s *Simple_select_stmtContext) Limit_stmt() ILimit_stmtContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ILimit_stmtContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ILimit_stmtContext) +} + +func (s *Simple_select_stmtContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Simple_select_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Simple_select_stmtContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterSimple_select_stmt(s) + } +} + +func (s *Simple_select_stmtContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitSimple_select_stmt(s) + } +} + +func (p *SQLiteParser) Simple_select_stmt() (localctx ISimple_select_stmtContext) { + this := p + _ = this + + localctx = NewSimple_select_stmtContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 94, SQLiteParserRULE_simple_select_stmt) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + p.SetState(1354) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserWITH_ { + { + p.SetState(1353) + p.Common_table_stmt() + } + + } + { + p.SetState(1356) + p.Select_core() + } + p.SetState(1358) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserORDER_ { + { + p.SetState(1357) + p.Order_by_stmt() + } + + } + p.SetState(1361) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserLIMIT_ { + { + p.SetState(1360) + p.Limit_stmt() + } + + } + + return localctx +} + +// ICompound_select_stmtContext is an interface to support dynamic dispatch. +type ICompound_select_stmtContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + AllSelect_core() []ISelect_coreContext + Select_core(i int) ISelect_coreContext + Common_table_stmt() ICommon_table_stmtContext + Order_by_stmt() IOrder_by_stmtContext + Limit_stmt() ILimit_stmtContext + AllUNION_() []antlr.TerminalNode + UNION_(i int) antlr.TerminalNode + AllINTERSECT_() []antlr.TerminalNode + INTERSECT_(i int) antlr.TerminalNode + AllEXCEPT_() []antlr.TerminalNode + EXCEPT_(i int) antlr.TerminalNode + AllALL_() []antlr.TerminalNode + ALL_(i int) antlr.TerminalNode + + // IsCompound_select_stmtContext differentiates from other interfaces. + IsCompound_select_stmtContext() +} + +type Compound_select_stmtContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyCompound_select_stmtContext() *Compound_select_stmtContext { + var p = new(Compound_select_stmtContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_compound_select_stmt + return p +} + +func (*Compound_select_stmtContext) IsCompound_select_stmtContext() {} + +func NewCompound_select_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Compound_select_stmtContext { + var p = new(Compound_select_stmtContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_compound_select_stmt + + return p +} + +func (s *Compound_select_stmtContext) GetParser() antlr.Parser { return s.parser } + +func (s *Compound_select_stmtContext) AllSelect_core() []ISelect_coreContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(ISelect_coreContext); ok { + len++ + } + } + + tst := make([]ISelect_coreContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(ISelect_coreContext); ok { + tst[i] = t.(ISelect_coreContext) + i++ + } + } + + return tst +} + +func (s *Compound_select_stmtContext) Select_core(i int) ISelect_coreContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ISelect_coreContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(ISelect_coreContext) +} + +func (s *Compound_select_stmtContext) Common_table_stmt() ICommon_table_stmtContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ICommon_table_stmtContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ICommon_table_stmtContext) +} + +func (s *Compound_select_stmtContext) Order_by_stmt() IOrder_by_stmtContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IOrder_by_stmtContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IOrder_by_stmtContext) +} + +func (s *Compound_select_stmtContext) Limit_stmt() ILimit_stmtContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ILimit_stmtContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ILimit_stmtContext) +} + +func (s *Compound_select_stmtContext) AllUNION_() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserUNION_) +} + +func (s *Compound_select_stmtContext) UNION_(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserUNION_, i) +} + +func (s *Compound_select_stmtContext) AllINTERSECT_() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserINTERSECT_) +} + +func (s *Compound_select_stmtContext) INTERSECT_(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserINTERSECT_, i) +} + +func (s *Compound_select_stmtContext) AllEXCEPT_() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserEXCEPT_) +} + +func (s *Compound_select_stmtContext) EXCEPT_(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserEXCEPT_, i) +} + +func (s *Compound_select_stmtContext) AllALL_() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserALL_) +} + +func (s *Compound_select_stmtContext) ALL_(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserALL_, i) +} + +func (s *Compound_select_stmtContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Compound_select_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Compound_select_stmtContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterCompound_select_stmt(s) + } +} + +func (s *Compound_select_stmtContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitCompound_select_stmt(s) + } +} + +func (p *SQLiteParser) Compound_select_stmt() (localctx ICompound_select_stmtContext) { + this := p + _ = this + + localctx = NewCompound_select_stmtContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 96, SQLiteParserRULE_compound_select_stmt) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + p.SetState(1364) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserWITH_ { + { + p.SetState(1363) + p.Common_table_stmt() + } + + } + { + p.SetState(1366) + p.Select_core() + } + p.SetState(1376) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for ok := true; ok; ok = _la == SQLiteParserEXCEPT_ || _la == SQLiteParserINTERSECT_ || _la == SQLiteParserUNION_ { + p.SetState(1373) + p.GetErrorHandler().Sync(p) + + switch p.GetTokenStream().LA(1) { + case SQLiteParserUNION_: + { + p.SetState(1367) + p.Match(SQLiteParserUNION_) + } + p.SetState(1369) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserALL_ { + { + p.SetState(1368) + p.Match(SQLiteParserALL_) + } + + } + + case SQLiteParserINTERSECT_: + { + p.SetState(1371) + p.Match(SQLiteParserINTERSECT_) + } + + case SQLiteParserEXCEPT_: + { + p.SetState(1372) + p.Match(SQLiteParserEXCEPT_) + } + + default: + panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) + } + { + p.SetState(1375) + p.Select_core() + } + + p.SetState(1378) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + p.SetState(1381) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserORDER_ { + { + p.SetState(1380) + p.Order_by_stmt() + } + + } + p.SetState(1384) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserLIMIT_ { + { + p.SetState(1383) + p.Limit_stmt() + } + + } + + return localctx +} + +// ITable_or_subqueryContext is an interface to support dynamic dispatch. +type ITable_or_subqueryContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + Table_name() ITable_nameContext + Schema_name() ISchema_nameContext + DOT() antlr.TerminalNode + Table_alias() ITable_aliasContext + INDEXED_() antlr.TerminalNode + BY_() antlr.TerminalNode + Index_name() IIndex_nameContext + NOT_() antlr.TerminalNode + AS_() antlr.TerminalNode + Table_function_name() ITable_function_nameContext + OPEN_PAR() antlr.TerminalNode + AllExpr() []IExprContext + Expr(i int) IExprContext + CLOSE_PAR() antlr.TerminalNode + AllCOMMA() []antlr.TerminalNode + COMMA(i int) antlr.TerminalNode + AllTable_or_subquery() []ITable_or_subqueryContext + Table_or_subquery(i int) ITable_or_subqueryContext + Join_clause() IJoin_clauseContext + Select_stmt() ISelect_stmtContext + + // IsTable_or_subqueryContext differentiates from other interfaces. + IsTable_or_subqueryContext() +} + +type Table_or_subqueryContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyTable_or_subqueryContext() *Table_or_subqueryContext { + var p = new(Table_or_subqueryContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_table_or_subquery + return p +} + +func (*Table_or_subqueryContext) IsTable_or_subqueryContext() {} + +func NewTable_or_subqueryContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Table_or_subqueryContext { + var p = new(Table_or_subqueryContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_table_or_subquery + + return p +} + +func (s *Table_or_subqueryContext) GetParser() antlr.Parser { return s.parser } + +func (s *Table_or_subqueryContext) Table_name() ITable_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ITable_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ITable_nameContext) +} + +func (s *Table_or_subqueryContext) Schema_name() ISchema_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ISchema_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ISchema_nameContext) +} + +func (s *Table_or_subqueryContext) DOT() antlr.TerminalNode { + return s.GetToken(SQLiteParserDOT, 0) +} + +func (s *Table_or_subqueryContext) Table_alias() ITable_aliasContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ITable_aliasContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ITable_aliasContext) +} + +func (s *Table_or_subqueryContext) INDEXED_() antlr.TerminalNode { + return s.GetToken(SQLiteParserINDEXED_, 0) +} + +func (s *Table_or_subqueryContext) BY_() antlr.TerminalNode { + return s.GetToken(SQLiteParserBY_, 0) +} + +func (s *Table_or_subqueryContext) Index_name() IIndex_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IIndex_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IIndex_nameContext) +} + +func (s *Table_or_subqueryContext) NOT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserNOT_, 0) +} + +func (s *Table_or_subqueryContext) AS_() antlr.TerminalNode { + return s.GetToken(SQLiteParserAS_, 0) +} + +func (s *Table_or_subqueryContext) Table_function_name() ITable_function_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ITable_function_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ITable_function_nameContext) +} + +func (s *Table_or_subqueryContext) OPEN_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserOPEN_PAR, 0) +} + +func (s *Table_or_subqueryContext) AllExpr() []IExprContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IExprContext); ok { + len++ + } + } + + tst := make([]IExprContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IExprContext); ok { + tst[i] = t.(IExprContext) + i++ + } + } + + return tst +} + +func (s *Table_or_subqueryContext) Expr(i int) IExprContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IExprContext) +} + +func (s *Table_or_subqueryContext) CLOSE_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserCLOSE_PAR, 0) +} + +func (s *Table_or_subqueryContext) AllCOMMA() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserCOMMA) +} + +func (s *Table_or_subqueryContext) COMMA(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserCOMMA, i) +} + +func (s *Table_or_subqueryContext) AllTable_or_subquery() []ITable_or_subqueryContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(ITable_or_subqueryContext); ok { + len++ + } + } + + tst := make([]ITable_or_subqueryContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(ITable_or_subqueryContext); ok { + tst[i] = t.(ITable_or_subqueryContext) + i++ + } + } + + return tst +} + +func (s *Table_or_subqueryContext) Table_or_subquery(i int) ITable_or_subqueryContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ITable_or_subqueryContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(ITable_or_subqueryContext) +} + +func (s *Table_or_subqueryContext) Join_clause() IJoin_clauseContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IJoin_clauseContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IJoin_clauseContext) +} + +func (s *Table_or_subqueryContext) Select_stmt() ISelect_stmtContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ISelect_stmtContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ISelect_stmtContext) +} + +func (s *Table_or_subqueryContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Table_or_subqueryContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Table_or_subqueryContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterTable_or_subquery(s) + } +} + +func (s *Table_or_subqueryContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitTable_or_subquery(s) + } +} + +func (p *SQLiteParser) Table_or_subquery() (localctx ITable_or_subqueryContext) { + this := p + _ = this + + localctx = NewTable_or_subqueryContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 98, SQLiteParserRULE_table_or_subquery) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.SetState(1450) + p.GetErrorHandler().Sync(p) + switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 205, p.GetParserRuleContext()) { + case 1: + p.EnterOuterAlt(localctx, 1) + p.SetState(1389) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 193, p.GetParserRuleContext()) == 1 { + { + p.SetState(1386) + p.Schema_name() + } + { + p.SetState(1387) + p.Match(SQLiteParserDOT) + } + + } + { + p.SetState(1391) + p.Table_name() + } + p.SetState(1396) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 195, p.GetParserRuleContext()) == 1 { + p.SetState(1393) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 194, p.GetParserRuleContext()) == 1 { + { + p.SetState(1392) + p.Match(SQLiteParserAS_) + } + + } + { + p.SetState(1395) + p.Table_alias() + } + + } + p.SetState(1403) + p.GetErrorHandler().Sync(p) + + switch p.GetTokenStream().LA(1) { + case SQLiteParserINDEXED_: + { + p.SetState(1398) + p.Match(SQLiteParserINDEXED_) + } + { + p.SetState(1399) + p.Match(SQLiteParserBY_) + } + { + p.SetState(1400) + p.Index_name() + } + + case SQLiteParserNOT_: + { + p.SetState(1401) + p.Match(SQLiteParserNOT_) + } + { + p.SetState(1402) + p.Match(SQLiteParserINDEXED_) + } + + case SQLiteParserEOF, SQLiteParserSCOL, SQLiteParserCLOSE_PAR, SQLiteParserCOMMA, SQLiteParserALTER_, SQLiteParserANALYZE_, SQLiteParserATTACH_, SQLiteParserBEGIN_, SQLiteParserCOMMIT_, SQLiteParserCREATE_, SQLiteParserCROSS_, SQLiteParserDELETE_, SQLiteParserDETACH_, SQLiteParserDROP_, SQLiteParserEND_, SQLiteParserEXCEPT_, SQLiteParserEXPLAIN_, SQLiteParserGROUP_, SQLiteParserINNER_, SQLiteParserINSERT_, SQLiteParserINTERSECT_, SQLiteParserJOIN_, SQLiteParserLEFT_, SQLiteParserLIMIT_, SQLiteParserNATURAL_, SQLiteParserON_, SQLiteParserORDER_, SQLiteParserPRAGMA_, SQLiteParserREINDEX_, SQLiteParserRELEASE_, SQLiteParserREPLACE_, SQLiteParserRETURNING_, SQLiteParserROLLBACK_, SQLiteParserSAVEPOINT_, SQLiteParserSELECT_, SQLiteParserUNION_, SQLiteParserUPDATE_, SQLiteParserUSING_, SQLiteParserVACUUM_, SQLiteParserVALUES_, SQLiteParserWHERE_, SQLiteParserWITH_, SQLiteParserWINDOW_: + + default: + } + + case 2: + p.EnterOuterAlt(localctx, 2) + p.SetState(1408) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 197, p.GetParserRuleContext()) == 1 { + { + p.SetState(1405) + p.Schema_name() + } + { + p.SetState(1406) + p.Match(SQLiteParserDOT) + } + + } + { + p.SetState(1410) + p.Table_function_name() + } + { + p.SetState(1411) + p.Match(SQLiteParserOPEN_PAR) + } + { + p.SetState(1412) + p.expr(0) + } + p.SetState(1417) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for _la == SQLiteParserCOMMA { + { + p.SetState(1413) + p.Match(SQLiteParserCOMMA) + } + { + p.SetState(1414) + p.expr(0) + } + + p.SetState(1419) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + { + p.SetState(1420) + p.Match(SQLiteParserCLOSE_PAR) + } + p.SetState(1425) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 200, p.GetParserRuleContext()) == 1 { + p.SetState(1422) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 199, p.GetParserRuleContext()) == 1 { + { + p.SetState(1421) + p.Match(SQLiteParserAS_) + } + + } + { + p.SetState(1424) + p.Table_alias() + } + + } + + case 3: + p.EnterOuterAlt(localctx, 3) + { + p.SetState(1427) + p.Match(SQLiteParserOPEN_PAR) + } + p.SetState(1437) + p.GetErrorHandler().Sync(p) + switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 202, p.GetParserRuleContext()) { + case 1: + { + p.SetState(1428) + p.Table_or_subquery() + } + p.SetState(1433) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for _la == SQLiteParserCOMMA { + { + p.SetState(1429) + p.Match(SQLiteParserCOMMA) + } + { + p.SetState(1430) + p.Table_or_subquery() + } + + p.SetState(1435) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + + case 2: + { + p.SetState(1436) + p.Join_clause() + } + + } + { + p.SetState(1439) + p.Match(SQLiteParserCLOSE_PAR) + } + + case 4: + p.EnterOuterAlt(localctx, 4) + { + p.SetState(1441) + p.Match(SQLiteParserOPEN_PAR) + } + { + p.SetState(1442) + p.Select_stmt() + } + { + p.SetState(1443) + p.Match(SQLiteParserCLOSE_PAR) + } + p.SetState(1448) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 204, p.GetParserRuleContext()) == 1 { + p.SetState(1445) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 203, p.GetParserRuleContext()) == 1 { + { + p.SetState(1444) + p.Match(SQLiteParserAS_) + } + + } + { + p.SetState(1447) + p.Table_alias() + } + + } + + } + + return localctx +} + +// IResult_columnContext is an interface to support dynamic dispatch. +type IResult_columnContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + STAR() antlr.TerminalNode + Table_name() ITable_nameContext + DOT() antlr.TerminalNode + Expr() IExprContext + Column_alias() IColumn_aliasContext + AS_() antlr.TerminalNode + + // IsResult_columnContext differentiates from other interfaces. + IsResult_columnContext() +} + +type Result_columnContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyResult_columnContext() *Result_columnContext { + var p = new(Result_columnContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_result_column + return p +} + +func (*Result_columnContext) IsResult_columnContext() {} + +func NewResult_columnContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Result_columnContext { + var p = new(Result_columnContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_result_column + + return p +} + +func (s *Result_columnContext) GetParser() antlr.Parser { return s.parser } + +func (s *Result_columnContext) STAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserSTAR, 0) +} + +func (s *Result_columnContext) Table_name() ITable_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ITable_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ITable_nameContext) +} + +func (s *Result_columnContext) DOT() antlr.TerminalNode { + return s.GetToken(SQLiteParserDOT, 0) +} + +func (s *Result_columnContext) Expr() IExprContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IExprContext) +} + +func (s *Result_columnContext) Column_alias() IColumn_aliasContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IColumn_aliasContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IColumn_aliasContext) +} + +func (s *Result_columnContext) AS_() antlr.TerminalNode { + return s.GetToken(SQLiteParserAS_, 0) +} + +func (s *Result_columnContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Result_columnContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Result_columnContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterResult_column(s) + } +} + +func (s *Result_columnContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitResult_column(s) + } +} + +func (p *SQLiteParser) Result_column() (localctx IResult_columnContext) { + this := p + _ = this + + localctx = NewResult_columnContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 100, SQLiteParserRULE_result_column) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.SetState(1464) + p.GetErrorHandler().Sync(p) + switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 208, p.GetParserRuleContext()) { + case 1: + p.EnterOuterAlt(localctx, 1) + { + p.SetState(1452) + p.Match(SQLiteParserSTAR) + } + + case 2: + p.EnterOuterAlt(localctx, 2) + { + p.SetState(1453) + p.Table_name() + } + { + p.SetState(1454) + p.Match(SQLiteParserDOT) + } + { + p.SetState(1455) + p.Match(SQLiteParserSTAR) + } + + case 3: + p.EnterOuterAlt(localctx, 3) + { + p.SetState(1457) + p.expr(0) + } + p.SetState(1462) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserAS_ || _la == SQLiteParserIDENTIFIER || _la == SQLiteParserSTRING_LITERAL { + p.SetState(1459) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserAS_ { + { + p.SetState(1458) + p.Match(SQLiteParserAS_) + } + + } + { + p.SetState(1461) + p.Column_alias() + } + + } + + } + + return localctx +} + +// IJoin_operatorContext is an interface to support dynamic dispatch. +type IJoin_operatorContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + COMMA() antlr.TerminalNode + JOIN_() antlr.TerminalNode + NATURAL_() antlr.TerminalNode + LEFT_() antlr.TerminalNode + INNER_() antlr.TerminalNode + CROSS_() antlr.TerminalNode + OUTER_() antlr.TerminalNode + + // IsJoin_operatorContext differentiates from other interfaces. + IsJoin_operatorContext() +} + +type Join_operatorContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyJoin_operatorContext() *Join_operatorContext { + var p = new(Join_operatorContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_join_operator + return p +} + +func (*Join_operatorContext) IsJoin_operatorContext() {} + +func NewJoin_operatorContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Join_operatorContext { + var p = new(Join_operatorContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_join_operator + + return p +} + +func (s *Join_operatorContext) GetParser() antlr.Parser { return s.parser } + +func (s *Join_operatorContext) COMMA() antlr.TerminalNode { + return s.GetToken(SQLiteParserCOMMA, 0) +} + +func (s *Join_operatorContext) JOIN_() antlr.TerminalNode { + return s.GetToken(SQLiteParserJOIN_, 0) +} + +func (s *Join_operatorContext) NATURAL_() antlr.TerminalNode { + return s.GetToken(SQLiteParserNATURAL_, 0) +} + +func (s *Join_operatorContext) LEFT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserLEFT_, 0) +} + +func (s *Join_operatorContext) INNER_() antlr.TerminalNode { + return s.GetToken(SQLiteParserINNER_, 0) +} + +func (s *Join_operatorContext) CROSS_() antlr.TerminalNode { + return s.GetToken(SQLiteParserCROSS_, 0) +} + +func (s *Join_operatorContext) OUTER_() antlr.TerminalNode { + return s.GetToken(SQLiteParserOUTER_, 0) +} + +func (s *Join_operatorContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Join_operatorContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Join_operatorContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterJoin_operator(s) + } +} + +func (s *Join_operatorContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitJoin_operator(s) + } +} + +func (p *SQLiteParser) Join_operator() (localctx IJoin_operatorContext) { + this := p + _ = this + + localctx = NewJoin_operatorContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 102, SQLiteParserRULE_join_operator) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.SetState(1479) + p.GetErrorHandler().Sync(p) + + switch p.GetTokenStream().LA(1) { + case SQLiteParserCOMMA: + p.EnterOuterAlt(localctx, 1) + { + p.SetState(1466) + p.Match(SQLiteParserCOMMA) + } + + case SQLiteParserCROSS_, SQLiteParserINNER_, SQLiteParserJOIN_, SQLiteParserLEFT_, SQLiteParserNATURAL_: + p.EnterOuterAlt(localctx, 2) + p.SetState(1468) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserNATURAL_ { + { + p.SetState(1467) + p.Match(SQLiteParserNATURAL_) + } + + } + p.SetState(1476) + p.GetErrorHandler().Sync(p) + + switch p.GetTokenStream().LA(1) { + case SQLiteParserLEFT_: + { + p.SetState(1470) + p.Match(SQLiteParserLEFT_) + } + p.SetState(1472) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserOUTER_ { + { + p.SetState(1471) + p.Match(SQLiteParserOUTER_) + } + + } + + case SQLiteParserINNER_: + { + p.SetState(1474) + p.Match(SQLiteParserINNER_) + } + + case SQLiteParserCROSS_: + { + p.SetState(1475) + p.Match(SQLiteParserCROSS_) + } + + case SQLiteParserJOIN_: + + default: + } + { + p.SetState(1478) + p.Match(SQLiteParserJOIN_) + } + + default: + panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) + } + + return localctx +} + +// IJoin_constraintContext is an interface to support dynamic dispatch. +type IJoin_constraintContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + ON_() antlr.TerminalNode + Expr() IExprContext + USING_() antlr.TerminalNode + OPEN_PAR() antlr.TerminalNode + AllColumn_name() []IColumn_nameContext + Column_name(i int) IColumn_nameContext + CLOSE_PAR() antlr.TerminalNode + AllCOMMA() []antlr.TerminalNode + COMMA(i int) antlr.TerminalNode + + // IsJoin_constraintContext differentiates from other interfaces. + IsJoin_constraintContext() +} + +type Join_constraintContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyJoin_constraintContext() *Join_constraintContext { + var p = new(Join_constraintContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_join_constraint + return p +} + +func (*Join_constraintContext) IsJoin_constraintContext() {} + +func NewJoin_constraintContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Join_constraintContext { + var p = new(Join_constraintContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_join_constraint + + return p +} + +func (s *Join_constraintContext) GetParser() antlr.Parser { return s.parser } + +func (s *Join_constraintContext) ON_() antlr.TerminalNode { + return s.GetToken(SQLiteParserON_, 0) +} + +func (s *Join_constraintContext) Expr() IExprContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IExprContext) +} + +func (s *Join_constraintContext) USING_() antlr.TerminalNode { + return s.GetToken(SQLiteParserUSING_, 0) +} + +func (s *Join_constraintContext) OPEN_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserOPEN_PAR, 0) +} + +func (s *Join_constraintContext) AllColumn_name() []IColumn_nameContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IColumn_nameContext); ok { + len++ + } + } + + tst := make([]IColumn_nameContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IColumn_nameContext); ok { + tst[i] = t.(IColumn_nameContext) + i++ + } + } + + return tst +} + +func (s *Join_constraintContext) Column_name(i int) IColumn_nameContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IColumn_nameContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IColumn_nameContext) +} + +func (s *Join_constraintContext) CLOSE_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserCLOSE_PAR, 0) +} + +func (s *Join_constraintContext) AllCOMMA() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserCOMMA) +} + +func (s *Join_constraintContext) COMMA(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserCOMMA, i) +} + +func (s *Join_constraintContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Join_constraintContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Join_constraintContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterJoin_constraint(s) + } +} + +func (s *Join_constraintContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitJoin_constraint(s) + } +} + +func (p *SQLiteParser) Join_constraint() (localctx IJoin_constraintContext) { + this := p + _ = this + + localctx = NewJoin_constraintContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 104, SQLiteParserRULE_join_constraint) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.SetState(1495) + p.GetErrorHandler().Sync(p) + + switch p.GetTokenStream().LA(1) { + case SQLiteParserON_: + p.EnterOuterAlt(localctx, 1) + { + p.SetState(1481) + p.Match(SQLiteParserON_) + } + { + p.SetState(1482) + p.expr(0) + } + + case SQLiteParserUSING_: + p.EnterOuterAlt(localctx, 2) + { + p.SetState(1483) + p.Match(SQLiteParserUSING_) + } + { + p.SetState(1484) + p.Match(SQLiteParserOPEN_PAR) + } + { + p.SetState(1485) + p.Column_name() + } + p.SetState(1490) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for _la == SQLiteParserCOMMA { + { + p.SetState(1486) + p.Match(SQLiteParserCOMMA) + } + { + p.SetState(1487) + p.Column_name() + } + + p.SetState(1492) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + { + p.SetState(1493) + p.Match(SQLiteParserCLOSE_PAR) + } + + default: + panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) + } + + return localctx +} + +// ICompound_operatorContext is an interface to support dynamic dispatch. +type ICompound_operatorContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + UNION_() antlr.TerminalNode + ALL_() antlr.TerminalNode + INTERSECT_() antlr.TerminalNode + EXCEPT_() antlr.TerminalNode + + // IsCompound_operatorContext differentiates from other interfaces. + IsCompound_operatorContext() +} + +type Compound_operatorContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyCompound_operatorContext() *Compound_operatorContext { + var p = new(Compound_operatorContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_compound_operator + return p +} + +func (*Compound_operatorContext) IsCompound_operatorContext() {} + +func NewCompound_operatorContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Compound_operatorContext { + var p = new(Compound_operatorContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_compound_operator + + return p +} + +func (s *Compound_operatorContext) GetParser() antlr.Parser { return s.parser } + +func (s *Compound_operatorContext) UNION_() antlr.TerminalNode { + return s.GetToken(SQLiteParserUNION_, 0) +} + +func (s *Compound_operatorContext) ALL_() antlr.TerminalNode { + return s.GetToken(SQLiteParserALL_, 0) +} + +func (s *Compound_operatorContext) INTERSECT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserINTERSECT_, 0) +} + +func (s *Compound_operatorContext) EXCEPT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserEXCEPT_, 0) +} + +func (s *Compound_operatorContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Compound_operatorContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Compound_operatorContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterCompound_operator(s) + } +} + +func (s *Compound_operatorContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitCompound_operator(s) + } +} + +func (p *SQLiteParser) Compound_operator() (localctx ICompound_operatorContext) { + this := p + _ = this + + localctx = NewCompound_operatorContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 106, SQLiteParserRULE_compound_operator) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.SetState(1503) + p.GetErrorHandler().Sync(p) + + switch p.GetTokenStream().LA(1) { + case SQLiteParserUNION_: + p.EnterOuterAlt(localctx, 1) + { + p.SetState(1497) + p.Match(SQLiteParserUNION_) + } + p.SetState(1499) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserALL_ { + { + p.SetState(1498) + p.Match(SQLiteParserALL_) + } + + } + + case SQLiteParserINTERSECT_: + p.EnterOuterAlt(localctx, 2) + { + p.SetState(1501) + p.Match(SQLiteParserINTERSECT_) + } + + case SQLiteParserEXCEPT_: + p.EnterOuterAlt(localctx, 3) + { + p.SetState(1502) + p.Match(SQLiteParserEXCEPT_) + } + + default: + panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) + } + + return localctx +} + +// IUpdate_stmtContext is an interface to support dynamic dispatch. +type IUpdate_stmtContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + UPDATE_() antlr.TerminalNode + Qualified_table_name() IQualified_table_nameContext + SET_() antlr.TerminalNode + AllASSIGN() []antlr.TerminalNode + ASSIGN(i int) antlr.TerminalNode + AllExpr() []IExprContext + Expr(i int) IExprContext + AllColumn_name() []IColumn_nameContext + Column_name(i int) IColumn_nameContext + AllColumn_name_list() []IColumn_name_listContext + Column_name_list(i int) IColumn_name_listContext + With_clause() IWith_clauseContext + OR_() antlr.TerminalNode + AllCOMMA() []antlr.TerminalNode + COMMA(i int) antlr.TerminalNode + FROM_() antlr.TerminalNode + WHERE_() antlr.TerminalNode + Returning_clause() IReturning_clauseContext + ROLLBACK_() antlr.TerminalNode + ABORT_() antlr.TerminalNode + REPLACE_() antlr.TerminalNode + FAIL_() antlr.TerminalNode + IGNORE_() antlr.TerminalNode + AllTable_or_subquery() []ITable_or_subqueryContext + Table_or_subquery(i int) ITable_or_subqueryContext + Join_clause() IJoin_clauseContext + + // IsUpdate_stmtContext differentiates from other interfaces. + IsUpdate_stmtContext() +} + +type Update_stmtContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyUpdate_stmtContext() *Update_stmtContext { + var p = new(Update_stmtContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_update_stmt + return p +} + +func (*Update_stmtContext) IsUpdate_stmtContext() {} + +func NewUpdate_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Update_stmtContext { + var p = new(Update_stmtContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_update_stmt + + return p +} + +func (s *Update_stmtContext) GetParser() antlr.Parser { return s.parser } + +func (s *Update_stmtContext) UPDATE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserUPDATE_, 0) +} + +func (s *Update_stmtContext) Qualified_table_name() IQualified_table_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IQualified_table_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IQualified_table_nameContext) +} + +func (s *Update_stmtContext) SET_() antlr.TerminalNode { + return s.GetToken(SQLiteParserSET_, 0) +} + +func (s *Update_stmtContext) AllASSIGN() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserASSIGN) +} + +func (s *Update_stmtContext) ASSIGN(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserASSIGN, i) +} + +func (s *Update_stmtContext) AllExpr() []IExprContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IExprContext); ok { + len++ + } + } + + tst := make([]IExprContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IExprContext); ok { + tst[i] = t.(IExprContext) + i++ + } + } + + return tst +} + +func (s *Update_stmtContext) Expr(i int) IExprContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IExprContext) +} + +func (s *Update_stmtContext) AllColumn_name() []IColumn_nameContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IColumn_nameContext); ok { + len++ + } + } + + tst := make([]IColumn_nameContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IColumn_nameContext); ok { + tst[i] = t.(IColumn_nameContext) + i++ + } + } + + return tst +} + +func (s *Update_stmtContext) Column_name(i int) IColumn_nameContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IColumn_nameContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IColumn_nameContext) +} + +func (s *Update_stmtContext) AllColumn_name_list() []IColumn_name_listContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IColumn_name_listContext); ok { + len++ + } + } + + tst := make([]IColumn_name_listContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IColumn_name_listContext); ok { + tst[i] = t.(IColumn_name_listContext) + i++ + } + } + + return tst +} + +func (s *Update_stmtContext) Column_name_list(i int) IColumn_name_listContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IColumn_name_listContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IColumn_name_listContext) +} + +func (s *Update_stmtContext) With_clause() IWith_clauseContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IWith_clauseContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IWith_clauseContext) +} + +func (s *Update_stmtContext) OR_() antlr.TerminalNode { + return s.GetToken(SQLiteParserOR_, 0) +} + +func (s *Update_stmtContext) AllCOMMA() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserCOMMA) +} + +func (s *Update_stmtContext) COMMA(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserCOMMA, i) +} + +func (s *Update_stmtContext) FROM_() antlr.TerminalNode { + return s.GetToken(SQLiteParserFROM_, 0) +} + +func (s *Update_stmtContext) WHERE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserWHERE_, 0) +} + +func (s *Update_stmtContext) Returning_clause() IReturning_clauseContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IReturning_clauseContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IReturning_clauseContext) +} + +func (s *Update_stmtContext) ROLLBACK_() antlr.TerminalNode { + return s.GetToken(SQLiteParserROLLBACK_, 0) +} + +func (s *Update_stmtContext) ABORT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserABORT_, 0) +} + +func (s *Update_stmtContext) REPLACE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserREPLACE_, 0) +} + +func (s *Update_stmtContext) FAIL_() antlr.TerminalNode { + return s.GetToken(SQLiteParserFAIL_, 0) +} + +func (s *Update_stmtContext) IGNORE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserIGNORE_, 0) +} + +func (s *Update_stmtContext) AllTable_or_subquery() []ITable_or_subqueryContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(ITable_or_subqueryContext); ok { + len++ + } + } + + tst := make([]ITable_or_subqueryContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(ITable_or_subqueryContext); ok { + tst[i] = t.(ITable_or_subqueryContext) + i++ + } + } + + return tst +} + +func (s *Update_stmtContext) Table_or_subquery(i int) ITable_or_subqueryContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ITable_or_subqueryContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(ITable_or_subqueryContext) +} + +func (s *Update_stmtContext) Join_clause() IJoin_clauseContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IJoin_clauseContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IJoin_clauseContext) +} + +func (s *Update_stmtContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Update_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Update_stmtContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterUpdate_stmt(s) + } +} + +func (s *Update_stmtContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitUpdate_stmt(s) + } +} + +func (p *SQLiteParser) Update_stmt() (localctx IUpdate_stmtContext) { + this := p + _ = this + + localctx = NewUpdate_stmtContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 108, SQLiteParserRULE_update_stmt) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + p.SetState(1506) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserWITH_ { + { + p.SetState(1505) + p.With_clause() + } + + } + { + p.SetState(1508) + p.Match(SQLiteParserUPDATE_) + } + p.SetState(1511) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 218, p.GetParserRuleContext()) == 1 { + { + p.SetState(1509) + p.Match(SQLiteParserOR_) + } + { + p.SetState(1510) + _la = p.GetTokenStream().LA(1) + + if !(_la == SQLiteParserABORT_ || ((int64((_la-72)) & ^0x3f) == 0 && ((int64(1)<<(_la-72))&19140298416325121) != 0)) { + p.GetErrorHandler().RecoverInline(p) + } else { + p.GetErrorHandler().ReportMatch(p) + p.Consume() + } + } + + } + { + p.SetState(1513) + p.Qualified_table_name() + } + { + p.SetState(1514) + p.Match(SQLiteParserSET_) + } + p.SetState(1517) + p.GetErrorHandler().Sync(p) + switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 219, p.GetParserRuleContext()) { + case 1: + { + p.SetState(1515) + p.Column_name() + } + + case 2: + { + p.SetState(1516) + p.Column_name_list() + } + + } + { + p.SetState(1519) + p.Match(SQLiteParserASSIGN) + } + { + p.SetState(1520) + p.expr(0) + } + p.SetState(1531) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for _la == SQLiteParserCOMMA { + { + p.SetState(1521) + p.Match(SQLiteParserCOMMA) + } + p.SetState(1524) + p.GetErrorHandler().Sync(p) + switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 220, p.GetParserRuleContext()) { + case 1: + { + p.SetState(1522) + p.Column_name() + } + + case 2: + { + p.SetState(1523) + p.Column_name_list() + } + + } + { + p.SetState(1526) + p.Match(SQLiteParserASSIGN) + } + { + p.SetState(1527) + p.expr(0) + } + + p.SetState(1533) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + p.SetState(1546) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserFROM_ { + { + p.SetState(1534) + p.Match(SQLiteParserFROM_) + } + p.SetState(1544) + p.GetErrorHandler().Sync(p) + switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 223, p.GetParserRuleContext()) { + case 1: + { + p.SetState(1535) + p.Table_or_subquery() + } + p.SetState(1540) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for _la == SQLiteParserCOMMA { + { + p.SetState(1536) + p.Match(SQLiteParserCOMMA) + } + { + p.SetState(1537) + p.Table_or_subquery() + } + + p.SetState(1542) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + + case 2: + { + p.SetState(1543) + p.Join_clause() + } + + } + + } + p.SetState(1550) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserWHERE_ { + { + p.SetState(1548) + p.Match(SQLiteParserWHERE_) + } + { + p.SetState(1549) + p.expr(0) + } + + } + p.SetState(1553) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserRETURNING_ { + { + p.SetState(1552) + p.Returning_clause() + } + + } + + return localctx +} + +// IColumn_name_listContext is an interface to support dynamic dispatch. +type IColumn_name_listContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + OPEN_PAR() antlr.TerminalNode + AllColumn_name() []IColumn_nameContext + Column_name(i int) IColumn_nameContext + CLOSE_PAR() antlr.TerminalNode + AllCOMMA() []antlr.TerminalNode + COMMA(i int) antlr.TerminalNode + + // IsColumn_name_listContext differentiates from other interfaces. + IsColumn_name_listContext() +} + +type Column_name_listContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyColumn_name_listContext() *Column_name_listContext { + var p = new(Column_name_listContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_column_name_list + return p +} + +func (*Column_name_listContext) IsColumn_name_listContext() {} + +func NewColumn_name_listContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Column_name_listContext { + var p = new(Column_name_listContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_column_name_list + + return p +} + +func (s *Column_name_listContext) GetParser() antlr.Parser { return s.parser } + +func (s *Column_name_listContext) OPEN_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserOPEN_PAR, 0) +} + +func (s *Column_name_listContext) AllColumn_name() []IColumn_nameContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IColumn_nameContext); ok { + len++ + } + } + + tst := make([]IColumn_nameContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IColumn_nameContext); ok { + tst[i] = t.(IColumn_nameContext) + i++ + } + } + + return tst +} + +func (s *Column_name_listContext) Column_name(i int) IColumn_nameContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IColumn_nameContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IColumn_nameContext) +} + +func (s *Column_name_listContext) CLOSE_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserCLOSE_PAR, 0) +} + +func (s *Column_name_listContext) AllCOMMA() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserCOMMA) +} + +func (s *Column_name_listContext) COMMA(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserCOMMA, i) +} + +func (s *Column_name_listContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Column_name_listContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Column_name_listContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterColumn_name_list(s) + } +} + +func (s *Column_name_listContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitColumn_name_list(s) + } +} + +func (p *SQLiteParser) Column_name_list() (localctx IColumn_name_listContext) { + this := p + _ = this + + localctx = NewColumn_name_listContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 110, SQLiteParserRULE_column_name_list) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(1555) + p.Match(SQLiteParserOPEN_PAR) + } + { + p.SetState(1556) + p.Column_name() + } + p.SetState(1561) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for _la == SQLiteParserCOMMA { + { + p.SetState(1557) + p.Match(SQLiteParserCOMMA) + } + { + p.SetState(1558) + p.Column_name() + } + + p.SetState(1563) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + { + p.SetState(1564) + p.Match(SQLiteParserCLOSE_PAR) + } + + return localctx +} + +// IUpdate_stmt_limitedContext is an interface to support dynamic dispatch. +type IUpdate_stmt_limitedContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + UPDATE_() antlr.TerminalNode + Qualified_table_name() IQualified_table_nameContext + SET_() antlr.TerminalNode + AllASSIGN() []antlr.TerminalNode + ASSIGN(i int) antlr.TerminalNode + AllExpr() []IExprContext + Expr(i int) IExprContext + AllColumn_name() []IColumn_nameContext + Column_name(i int) IColumn_nameContext + AllColumn_name_list() []IColumn_name_listContext + Column_name_list(i int) IColumn_name_listContext + With_clause() IWith_clauseContext + OR_() antlr.TerminalNode + AllCOMMA() []antlr.TerminalNode + COMMA(i int) antlr.TerminalNode + WHERE_() antlr.TerminalNode + Returning_clause() IReturning_clauseContext + Limit_stmt() ILimit_stmtContext + ROLLBACK_() antlr.TerminalNode + ABORT_() antlr.TerminalNode + REPLACE_() antlr.TerminalNode + FAIL_() antlr.TerminalNode + IGNORE_() antlr.TerminalNode + Order_by_stmt() IOrder_by_stmtContext + + // IsUpdate_stmt_limitedContext differentiates from other interfaces. + IsUpdate_stmt_limitedContext() +} + +type Update_stmt_limitedContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyUpdate_stmt_limitedContext() *Update_stmt_limitedContext { + var p = new(Update_stmt_limitedContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_update_stmt_limited + return p +} + +func (*Update_stmt_limitedContext) IsUpdate_stmt_limitedContext() {} + +func NewUpdate_stmt_limitedContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Update_stmt_limitedContext { + var p = new(Update_stmt_limitedContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_update_stmt_limited + + return p +} + +func (s *Update_stmt_limitedContext) GetParser() antlr.Parser { return s.parser } + +func (s *Update_stmt_limitedContext) UPDATE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserUPDATE_, 0) +} + +func (s *Update_stmt_limitedContext) Qualified_table_name() IQualified_table_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IQualified_table_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IQualified_table_nameContext) +} + +func (s *Update_stmt_limitedContext) SET_() antlr.TerminalNode { + return s.GetToken(SQLiteParserSET_, 0) +} + +func (s *Update_stmt_limitedContext) AllASSIGN() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserASSIGN) +} + +func (s *Update_stmt_limitedContext) ASSIGN(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserASSIGN, i) +} + +func (s *Update_stmt_limitedContext) AllExpr() []IExprContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IExprContext); ok { + len++ + } + } + + tst := make([]IExprContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IExprContext); ok { + tst[i] = t.(IExprContext) + i++ + } + } + + return tst +} + +func (s *Update_stmt_limitedContext) Expr(i int) IExprContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IExprContext) +} + +func (s *Update_stmt_limitedContext) AllColumn_name() []IColumn_nameContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IColumn_nameContext); ok { + len++ + } + } + + tst := make([]IColumn_nameContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IColumn_nameContext); ok { + tst[i] = t.(IColumn_nameContext) + i++ + } + } + + return tst +} + +func (s *Update_stmt_limitedContext) Column_name(i int) IColumn_nameContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IColumn_nameContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IColumn_nameContext) +} + +func (s *Update_stmt_limitedContext) AllColumn_name_list() []IColumn_name_listContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IColumn_name_listContext); ok { + len++ + } + } + + tst := make([]IColumn_name_listContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IColumn_name_listContext); ok { + tst[i] = t.(IColumn_name_listContext) + i++ + } + } + + return tst +} + +func (s *Update_stmt_limitedContext) Column_name_list(i int) IColumn_name_listContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IColumn_name_listContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IColumn_name_listContext) +} + +func (s *Update_stmt_limitedContext) With_clause() IWith_clauseContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IWith_clauseContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IWith_clauseContext) +} + +func (s *Update_stmt_limitedContext) OR_() antlr.TerminalNode { + return s.GetToken(SQLiteParserOR_, 0) +} + +func (s *Update_stmt_limitedContext) AllCOMMA() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserCOMMA) +} + +func (s *Update_stmt_limitedContext) COMMA(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserCOMMA, i) +} + +func (s *Update_stmt_limitedContext) WHERE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserWHERE_, 0) +} + +func (s *Update_stmt_limitedContext) Returning_clause() IReturning_clauseContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IReturning_clauseContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IReturning_clauseContext) +} + +func (s *Update_stmt_limitedContext) Limit_stmt() ILimit_stmtContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ILimit_stmtContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ILimit_stmtContext) +} + +func (s *Update_stmt_limitedContext) ROLLBACK_() antlr.TerminalNode { + return s.GetToken(SQLiteParserROLLBACK_, 0) +} + +func (s *Update_stmt_limitedContext) ABORT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserABORT_, 0) +} + +func (s *Update_stmt_limitedContext) REPLACE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserREPLACE_, 0) +} + +func (s *Update_stmt_limitedContext) FAIL_() antlr.TerminalNode { + return s.GetToken(SQLiteParserFAIL_, 0) +} + +func (s *Update_stmt_limitedContext) IGNORE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserIGNORE_, 0) +} + +func (s *Update_stmt_limitedContext) Order_by_stmt() IOrder_by_stmtContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IOrder_by_stmtContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IOrder_by_stmtContext) +} + +func (s *Update_stmt_limitedContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Update_stmt_limitedContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Update_stmt_limitedContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterUpdate_stmt_limited(s) + } +} + +func (s *Update_stmt_limitedContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitUpdate_stmt_limited(s) + } +} + +func (p *SQLiteParser) Update_stmt_limited() (localctx IUpdate_stmt_limitedContext) { + this := p + _ = this + + localctx = NewUpdate_stmt_limitedContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 112, SQLiteParserRULE_update_stmt_limited) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + p.SetState(1567) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserWITH_ { + { + p.SetState(1566) + p.With_clause() + } + + } + { + p.SetState(1569) + p.Match(SQLiteParserUPDATE_) + } + p.SetState(1572) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 229, p.GetParserRuleContext()) == 1 { + { + p.SetState(1570) + p.Match(SQLiteParserOR_) + } + { + p.SetState(1571) + _la = p.GetTokenStream().LA(1) + + if !(_la == SQLiteParserABORT_ || ((int64((_la-72)) & ^0x3f) == 0 && ((int64(1)<<(_la-72))&19140298416325121) != 0)) { + p.GetErrorHandler().RecoverInline(p) + } else { + p.GetErrorHandler().ReportMatch(p) + p.Consume() + } + } + + } + { + p.SetState(1574) + p.Qualified_table_name() + } + { + p.SetState(1575) + p.Match(SQLiteParserSET_) + } + p.SetState(1578) + p.GetErrorHandler().Sync(p) + switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 230, p.GetParserRuleContext()) { + case 1: + { + p.SetState(1576) + p.Column_name() + } + + case 2: + { + p.SetState(1577) + p.Column_name_list() + } + + } + { + p.SetState(1580) + p.Match(SQLiteParserASSIGN) + } + { + p.SetState(1581) + p.expr(0) + } + p.SetState(1592) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for _la == SQLiteParserCOMMA { + { + p.SetState(1582) + p.Match(SQLiteParserCOMMA) + } + p.SetState(1585) + p.GetErrorHandler().Sync(p) + switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 231, p.GetParserRuleContext()) { + case 1: + { + p.SetState(1583) + p.Column_name() + } + + case 2: + { + p.SetState(1584) + p.Column_name_list() + } + + } + { + p.SetState(1587) + p.Match(SQLiteParserASSIGN) + } + { + p.SetState(1588) + p.expr(0) + } + + p.SetState(1594) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + p.SetState(1597) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserWHERE_ { + { + p.SetState(1595) + p.Match(SQLiteParserWHERE_) + } + { + p.SetState(1596) + p.expr(0) + } + + } + p.SetState(1600) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserRETURNING_ { + { + p.SetState(1599) + p.Returning_clause() + } + + } + p.SetState(1606) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserLIMIT_ || _la == SQLiteParserORDER_ { + p.SetState(1603) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserORDER_ { + { + p.SetState(1602) + p.Order_by_stmt() + } + + } + { + p.SetState(1605) + p.Limit_stmt() + } + + } + + return localctx +} + +// IQualified_table_nameContext is an interface to support dynamic dispatch. +type IQualified_table_nameContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + Table_name() ITable_nameContext + Schema_name() ISchema_nameContext + DOT() antlr.TerminalNode + AS_() antlr.TerminalNode + Alias() IAliasContext + INDEXED_() antlr.TerminalNode + BY_() antlr.TerminalNode + Index_name() IIndex_nameContext + NOT_() antlr.TerminalNode + + // IsQualified_table_nameContext differentiates from other interfaces. + IsQualified_table_nameContext() +} + +type Qualified_table_nameContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyQualified_table_nameContext() *Qualified_table_nameContext { + var p = new(Qualified_table_nameContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_qualified_table_name + return p +} + +func (*Qualified_table_nameContext) IsQualified_table_nameContext() {} + +func NewQualified_table_nameContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Qualified_table_nameContext { + var p = new(Qualified_table_nameContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_qualified_table_name + + return p +} + +func (s *Qualified_table_nameContext) GetParser() antlr.Parser { return s.parser } + +func (s *Qualified_table_nameContext) Table_name() ITable_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ITable_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ITable_nameContext) +} + +func (s *Qualified_table_nameContext) Schema_name() ISchema_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ISchema_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ISchema_nameContext) +} + +func (s *Qualified_table_nameContext) DOT() antlr.TerminalNode { + return s.GetToken(SQLiteParserDOT, 0) +} + +func (s *Qualified_table_nameContext) AS_() antlr.TerminalNode { + return s.GetToken(SQLiteParserAS_, 0) +} + +func (s *Qualified_table_nameContext) Alias() IAliasContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IAliasContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IAliasContext) +} + +func (s *Qualified_table_nameContext) INDEXED_() antlr.TerminalNode { + return s.GetToken(SQLiteParserINDEXED_, 0) +} + +func (s *Qualified_table_nameContext) BY_() antlr.TerminalNode { + return s.GetToken(SQLiteParserBY_, 0) +} + +func (s *Qualified_table_nameContext) Index_name() IIndex_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IIndex_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IIndex_nameContext) +} + +func (s *Qualified_table_nameContext) NOT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserNOT_, 0) +} + +func (s *Qualified_table_nameContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Qualified_table_nameContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Qualified_table_nameContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterQualified_table_name(s) + } +} + +func (s *Qualified_table_nameContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitQualified_table_name(s) + } +} + +func (p *SQLiteParser) Qualified_table_name() (localctx IQualified_table_nameContext) { + this := p + _ = this + + localctx = NewQualified_table_nameContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 114, SQLiteParserRULE_qualified_table_name) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + p.SetState(1611) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 237, p.GetParserRuleContext()) == 1 { + { + p.SetState(1608) + p.Schema_name() + } + { + p.SetState(1609) + p.Match(SQLiteParserDOT) + } + + } + { + p.SetState(1613) + p.Table_name() + } + p.SetState(1616) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserAS_ { + { + p.SetState(1614) + p.Match(SQLiteParserAS_) + } + { + p.SetState(1615) + p.Alias() + } + + } + p.SetState(1623) + p.GetErrorHandler().Sync(p) + + switch p.GetTokenStream().LA(1) { + case SQLiteParserINDEXED_: + { + p.SetState(1618) + p.Match(SQLiteParserINDEXED_) + } + { + p.SetState(1619) + p.Match(SQLiteParserBY_) + } + { + p.SetState(1620) + p.Index_name() + } + + case SQLiteParserNOT_: + { + p.SetState(1621) + p.Match(SQLiteParserNOT_) + } + { + p.SetState(1622) + p.Match(SQLiteParserINDEXED_) + } + + case SQLiteParserEOF, SQLiteParserSCOL, SQLiteParserALTER_, SQLiteParserANALYZE_, SQLiteParserATTACH_, SQLiteParserBEGIN_, SQLiteParserCOMMIT_, SQLiteParserCREATE_, SQLiteParserDELETE_, SQLiteParserDETACH_, SQLiteParserDROP_, SQLiteParserEND_, SQLiteParserEXPLAIN_, SQLiteParserINSERT_, SQLiteParserLIMIT_, SQLiteParserORDER_, SQLiteParserPRAGMA_, SQLiteParserREINDEX_, SQLiteParserRELEASE_, SQLiteParserREPLACE_, SQLiteParserRETURNING_, SQLiteParserROLLBACK_, SQLiteParserSAVEPOINT_, SQLiteParserSELECT_, SQLiteParserSET_, SQLiteParserUPDATE_, SQLiteParserVACUUM_, SQLiteParserVALUES_, SQLiteParserWHERE_, SQLiteParserWITH_: + + default: + } + + return localctx +} + +// IVacuum_stmtContext is an interface to support dynamic dispatch. +type IVacuum_stmtContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + VACUUM_() antlr.TerminalNode + Schema_name() ISchema_nameContext + INTO_() antlr.TerminalNode + Filename() IFilenameContext + + // IsVacuum_stmtContext differentiates from other interfaces. + IsVacuum_stmtContext() +} + +type Vacuum_stmtContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyVacuum_stmtContext() *Vacuum_stmtContext { + var p = new(Vacuum_stmtContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_vacuum_stmt + return p +} + +func (*Vacuum_stmtContext) IsVacuum_stmtContext() {} + +func NewVacuum_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Vacuum_stmtContext { + var p = new(Vacuum_stmtContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_vacuum_stmt + + return p +} + +func (s *Vacuum_stmtContext) GetParser() antlr.Parser { return s.parser } + +func (s *Vacuum_stmtContext) VACUUM_() antlr.TerminalNode { + return s.GetToken(SQLiteParserVACUUM_, 0) +} + +func (s *Vacuum_stmtContext) Schema_name() ISchema_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ISchema_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ISchema_nameContext) +} + +func (s *Vacuum_stmtContext) INTO_() antlr.TerminalNode { + return s.GetToken(SQLiteParserINTO_, 0) +} + +func (s *Vacuum_stmtContext) Filename() IFilenameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IFilenameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IFilenameContext) +} + +func (s *Vacuum_stmtContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Vacuum_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Vacuum_stmtContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterVacuum_stmt(s) + } +} + +func (s *Vacuum_stmtContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitVacuum_stmt(s) + } +} + +func (p *SQLiteParser) Vacuum_stmt() (localctx IVacuum_stmtContext) { + this := p + _ = this + + localctx = NewVacuum_stmtContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 116, SQLiteParserRULE_vacuum_stmt) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(1625) + p.Match(SQLiteParserVACUUM_) + } + p.SetState(1627) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 240, p.GetParserRuleContext()) == 1 { + { + p.SetState(1626) + p.Schema_name() + } + + } + p.SetState(1631) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserINTO_ { + { + p.SetState(1629) + p.Match(SQLiteParserINTO_) + } + { + p.SetState(1630) + p.Filename() + } + + } + + return localctx +} + +// IFilter_clauseContext is an interface to support dynamic dispatch. +type IFilter_clauseContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + FILTER_() antlr.TerminalNode + OPEN_PAR() antlr.TerminalNode + WHERE_() antlr.TerminalNode + Expr() IExprContext + CLOSE_PAR() antlr.TerminalNode + + // IsFilter_clauseContext differentiates from other interfaces. + IsFilter_clauseContext() +} + +type Filter_clauseContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyFilter_clauseContext() *Filter_clauseContext { + var p = new(Filter_clauseContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_filter_clause + return p +} + +func (*Filter_clauseContext) IsFilter_clauseContext() {} + +func NewFilter_clauseContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Filter_clauseContext { + var p = new(Filter_clauseContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_filter_clause + + return p +} + +func (s *Filter_clauseContext) GetParser() antlr.Parser { return s.parser } + +func (s *Filter_clauseContext) FILTER_() antlr.TerminalNode { + return s.GetToken(SQLiteParserFILTER_, 0) +} + +func (s *Filter_clauseContext) OPEN_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserOPEN_PAR, 0) +} + +func (s *Filter_clauseContext) WHERE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserWHERE_, 0) +} + +func (s *Filter_clauseContext) Expr() IExprContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IExprContext) +} + +func (s *Filter_clauseContext) CLOSE_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserCLOSE_PAR, 0) +} + +func (s *Filter_clauseContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Filter_clauseContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Filter_clauseContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterFilter_clause(s) + } +} + +func (s *Filter_clauseContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitFilter_clause(s) + } +} + +func (p *SQLiteParser) Filter_clause() (localctx IFilter_clauseContext) { + this := p + _ = this + + localctx = NewFilter_clauseContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 118, SQLiteParserRULE_filter_clause) + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(1633) + p.Match(SQLiteParserFILTER_) + } + { + p.SetState(1634) + p.Match(SQLiteParserOPEN_PAR) + } + { + p.SetState(1635) + p.Match(SQLiteParserWHERE_) + } + { + p.SetState(1636) + p.expr(0) + } + { + p.SetState(1637) + p.Match(SQLiteParserCLOSE_PAR) + } + + return localctx +} + +// IWindow_defnContext is an interface to support dynamic dispatch. +type IWindow_defnContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + OPEN_PAR() antlr.TerminalNode + CLOSE_PAR() antlr.TerminalNode + ORDER_() antlr.TerminalNode + AllBY_() []antlr.TerminalNode + BY_(i int) antlr.TerminalNode + AllOrdering_term() []IOrdering_termContext + Ordering_term(i int) IOrdering_termContext + Base_window_name() IBase_window_nameContext + PARTITION_() antlr.TerminalNode + AllExpr() []IExprContext + Expr(i int) IExprContext + Frame_spec() IFrame_specContext + AllCOMMA() []antlr.TerminalNode + COMMA(i int) antlr.TerminalNode + + // IsWindow_defnContext differentiates from other interfaces. + IsWindow_defnContext() +} + +type Window_defnContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyWindow_defnContext() *Window_defnContext { + var p = new(Window_defnContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_window_defn + return p +} + +func (*Window_defnContext) IsWindow_defnContext() {} + +func NewWindow_defnContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Window_defnContext { + var p = new(Window_defnContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_window_defn + + return p +} + +func (s *Window_defnContext) GetParser() antlr.Parser { return s.parser } + +func (s *Window_defnContext) OPEN_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserOPEN_PAR, 0) +} + +func (s *Window_defnContext) CLOSE_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserCLOSE_PAR, 0) +} + +func (s *Window_defnContext) ORDER_() antlr.TerminalNode { + return s.GetToken(SQLiteParserORDER_, 0) +} + +func (s *Window_defnContext) AllBY_() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserBY_) +} + +func (s *Window_defnContext) BY_(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserBY_, i) +} + +func (s *Window_defnContext) AllOrdering_term() []IOrdering_termContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IOrdering_termContext); ok { + len++ + } + } + + tst := make([]IOrdering_termContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IOrdering_termContext); ok { + tst[i] = t.(IOrdering_termContext) + i++ + } + } + + return tst +} + +func (s *Window_defnContext) Ordering_term(i int) IOrdering_termContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IOrdering_termContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IOrdering_termContext) +} + +func (s *Window_defnContext) Base_window_name() IBase_window_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IBase_window_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IBase_window_nameContext) +} + +func (s *Window_defnContext) PARTITION_() antlr.TerminalNode { + return s.GetToken(SQLiteParserPARTITION_, 0) +} + +func (s *Window_defnContext) AllExpr() []IExprContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IExprContext); ok { + len++ + } + } + + tst := make([]IExprContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IExprContext); ok { + tst[i] = t.(IExprContext) + i++ + } + } + + return tst +} + +func (s *Window_defnContext) Expr(i int) IExprContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IExprContext) +} + +func (s *Window_defnContext) Frame_spec() IFrame_specContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IFrame_specContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IFrame_specContext) +} + +func (s *Window_defnContext) AllCOMMA() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserCOMMA) +} + +func (s *Window_defnContext) COMMA(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserCOMMA, i) +} + +func (s *Window_defnContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Window_defnContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Window_defnContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterWindow_defn(s) + } +} + +func (s *Window_defnContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitWindow_defn(s) + } +} + +func (p *SQLiteParser) Window_defn() (localctx IWindow_defnContext) { + this := p + _ = this + + localctx = NewWindow_defnContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 120, SQLiteParserRULE_window_defn) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(1639) + p.Match(SQLiteParserOPEN_PAR) + } + p.SetState(1641) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 242, p.GetParserRuleContext()) == 1 { + { + p.SetState(1640) + p.Base_window_name() + } + + } + p.SetState(1653) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserPARTITION_ { + { + p.SetState(1643) + p.Match(SQLiteParserPARTITION_) + } + { + p.SetState(1644) + p.Match(SQLiteParserBY_) + } + { + p.SetState(1645) + p.expr(0) + } + p.SetState(1650) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for _la == SQLiteParserCOMMA { + { + p.SetState(1646) + p.Match(SQLiteParserCOMMA) + } + { + p.SetState(1647) + p.expr(0) + } + + p.SetState(1652) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + + } + + { + p.SetState(1655) + p.Match(SQLiteParserORDER_) + } + { + p.SetState(1656) + p.Match(SQLiteParserBY_) + } + { + p.SetState(1657) + p.Ordering_term() + } + p.SetState(1662) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for _la == SQLiteParserCOMMA { + { + p.SetState(1658) + p.Match(SQLiteParserCOMMA) + } + { + p.SetState(1659) + p.Ordering_term() + } + + p.SetState(1664) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + + p.SetState(1666) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if (int64((_la-128)) & ^0x3f) == 0 && ((int64(1)<<(_la-128))&2251799880794113) != 0 { + { + p.SetState(1665) + p.Frame_spec() + } + + } + { + p.SetState(1668) + p.Match(SQLiteParserCLOSE_PAR) + } + + return localctx +} + +// IOver_clauseContext is an interface to support dynamic dispatch. +type IOver_clauseContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + OVER_() antlr.TerminalNode + Window_name() IWindow_nameContext + OPEN_PAR() antlr.TerminalNode + CLOSE_PAR() antlr.TerminalNode + Base_window_name() IBase_window_nameContext + PARTITION_() antlr.TerminalNode + AllBY_() []antlr.TerminalNode + BY_(i int) antlr.TerminalNode + AllExpr() []IExprContext + Expr(i int) IExprContext + ORDER_() antlr.TerminalNode + AllOrdering_term() []IOrdering_termContext + Ordering_term(i int) IOrdering_termContext + Frame_spec() IFrame_specContext + AllCOMMA() []antlr.TerminalNode + COMMA(i int) antlr.TerminalNode + + // IsOver_clauseContext differentiates from other interfaces. + IsOver_clauseContext() +} + +type Over_clauseContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyOver_clauseContext() *Over_clauseContext { + var p = new(Over_clauseContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_over_clause + return p +} + +func (*Over_clauseContext) IsOver_clauseContext() {} + +func NewOver_clauseContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Over_clauseContext { + var p = new(Over_clauseContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_over_clause + + return p +} + +func (s *Over_clauseContext) GetParser() antlr.Parser { return s.parser } + +func (s *Over_clauseContext) OVER_() antlr.TerminalNode { + return s.GetToken(SQLiteParserOVER_, 0) +} + +func (s *Over_clauseContext) Window_name() IWindow_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IWindow_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IWindow_nameContext) +} + +func (s *Over_clauseContext) OPEN_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserOPEN_PAR, 0) +} + +func (s *Over_clauseContext) CLOSE_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserCLOSE_PAR, 0) +} + +func (s *Over_clauseContext) Base_window_name() IBase_window_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IBase_window_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IBase_window_nameContext) +} + +func (s *Over_clauseContext) PARTITION_() antlr.TerminalNode { + return s.GetToken(SQLiteParserPARTITION_, 0) +} + +func (s *Over_clauseContext) AllBY_() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserBY_) +} + +func (s *Over_clauseContext) BY_(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserBY_, i) +} + +func (s *Over_clauseContext) AllExpr() []IExprContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IExprContext); ok { + len++ + } + } + + tst := make([]IExprContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IExprContext); ok { + tst[i] = t.(IExprContext) + i++ + } + } + + return tst +} + +func (s *Over_clauseContext) Expr(i int) IExprContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IExprContext) +} + +func (s *Over_clauseContext) ORDER_() antlr.TerminalNode { + return s.GetToken(SQLiteParserORDER_, 0) +} + +func (s *Over_clauseContext) AllOrdering_term() []IOrdering_termContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IOrdering_termContext); ok { + len++ + } + } + + tst := make([]IOrdering_termContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IOrdering_termContext); ok { + tst[i] = t.(IOrdering_termContext) + i++ + } + } + + return tst +} + +func (s *Over_clauseContext) Ordering_term(i int) IOrdering_termContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IOrdering_termContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IOrdering_termContext) +} + +func (s *Over_clauseContext) Frame_spec() IFrame_specContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IFrame_specContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IFrame_specContext) +} + +func (s *Over_clauseContext) AllCOMMA() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserCOMMA) +} + +func (s *Over_clauseContext) COMMA(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserCOMMA, i) +} + +func (s *Over_clauseContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Over_clauseContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Over_clauseContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterOver_clause(s) + } +} + +func (s *Over_clauseContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitOver_clause(s) + } +} + +func (p *SQLiteParser) Over_clause() (localctx IOver_clauseContext) { + this := p + _ = this + + localctx = NewOver_clauseContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 122, SQLiteParserRULE_over_clause) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(1670) + p.Match(SQLiteParserOVER_) + } + p.SetState(1704) + p.GetErrorHandler().Sync(p) + switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 253, p.GetParserRuleContext()) { + case 1: + { + p.SetState(1671) + p.Window_name() + } + + case 2: + { + p.SetState(1672) + p.Match(SQLiteParserOPEN_PAR) + } + p.SetState(1674) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 247, p.GetParserRuleContext()) == 1 { + { + p.SetState(1673) + p.Base_window_name() + } + + } + p.SetState(1686) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserPARTITION_ { + { + p.SetState(1676) + p.Match(SQLiteParserPARTITION_) + } + { + p.SetState(1677) + p.Match(SQLiteParserBY_) + } + { + p.SetState(1678) + p.expr(0) + } + p.SetState(1683) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for _la == SQLiteParserCOMMA { + { + p.SetState(1679) + p.Match(SQLiteParserCOMMA) + } + { + p.SetState(1680) + p.expr(0) + } + + p.SetState(1685) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + + } + p.SetState(1698) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserORDER_ { + { + p.SetState(1688) + p.Match(SQLiteParserORDER_) + } + { + p.SetState(1689) + p.Match(SQLiteParserBY_) + } + { + p.SetState(1690) + p.Ordering_term() + } + p.SetState(1695) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for _la == SQLiteParserCOMMA { + { + p.SetState(1691) + p.Match(SQLiteParserCOMMA) + } + { + p.SetState(1692) + p.Ordering_term() + } + + p.SetState(1697) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + + } + p.SetState(1701) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if (int64((_la-128)) & ^0x3f) == 0 && ((int64(1)<<(_la-128))&2251799880794113) != 0 { + { + p.SetState(1700) + p.Frame_spec() + } + + } + { + p.SetState(1703) + p.Match(SQLiteParserCLOSE_PAR) + } + + } + + return localctx +} + +// IFrame_specContext is an interface to support dynamic dispatch. +type IFrame_specContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + Frame_clause() IFrame_clauseContext + EXCLUDE_() antlr.TerminalNode + NO_() antlr.TerminalNode + OTHERS_() antlr.TerminalNode + CURRENT_() antlr.TerminalNode + ROW_() antlr.TerminalNode + GROUP_() antlr.TerminalNode + TIES_() antlr.TerminalNode + + // IsFrame_specContext differentiates from other interfaces. + IsFrame_specContext() +} + +type Frame_specContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyFrame_specContext() *Frame_specContext { + var p = new(Frame_specContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_frame_spec + return p +} + +func (*Frame_specContext) IsFrame_specContext() {} + +func NewFrame_specContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Frame_specContext { + var p = new(Frame_specContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_frame_spec + + return p +} + +func (s *Frame_specContext) GetParser() antlr.Parser { return s.parser } + +func (s *Frame_specContext) Frame_clause() IFrame_clauseContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IFrame_clauseContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IFrame_clauseContext) +} + +func (s *Frame_specContext) EXCLUDE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserEXCLUDE_, 0) +} + +func (s *Frame_specContext) NO_() antlr.TerminalNode { + return s.GetToken(SQLiteParserNO_, 0) +} + +func (s *Frame_specContext) OTHERS_() antlr.TerminalNode { + return s.GetToken(SQLiteParserOTHERS_, 0) +} + +func (s *Frame_specContext) CURRENT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserCURRENT_, 0) +} + +func (s *Frame_specContext) ROW_() antlr.TerminalNode { + return s.GetToken(SQLiteParserROW_, 0) +} + +func (s *Frame_specContext) GROUP_() antlr.TerminalNode { + return s.GetToken(SQLiteParserGROUP_, 0) +} + +func (s *Frame_specContext) TIES_() antlr.TerminalNode { + return s.GetToken(SQLiteParserTIES_, 0) +} + +func (s *Frame_specContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Frame_specContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Frame_specContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterFrame_spec(s) + } +} + +func (s *Frame_specContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitFrame_spec(s) + } +} + +func (p *SQLiteParser) Frame_spec() (localctx IFrame_specContext) { + this := p + _ = this + + localctx = NewFrame_specContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 124, SQLiteParserRULE_frame_spec) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(1706) + p.Frame_clause() + } + p.SetState(1716) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserEXCLUDE_ { + { + p.SetState(1707) + p.Match(SQLiteParserEXCLUDE_) + } + p.SetState(1714) + p.GetErrorHandler().Sync(p) + + switch p.GetTokenStream().LA(1) { + case SQLiteParserNO_: + { + p.SetState(1708) + p.Match(SQLiteParserNO_) + } + { + p.SetState(1709) + p.Match(SQLiteParserOTHERS_) + } + + case SQLiteParserCURRENT_: + { + p.SetState(1710) + p.Match(SQLiteParserCURRENT_) + } + { + p.SetState(1711) + p.Match(SQLiteParserROW_) + } + + case SQLiteParserGROUP_: + { + p.SetState(1712) + p.Match(SQLiteParserGROUP_) + } + + case SQLiteParserTIES_: + { + p.SetState(1713) + p.Match(SQLiteParserTIES_) + } + + default: + panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) + } + + } + + return localctx +} + +// IFrame_clauseContext is an interface to support dynamic dispatch. +type IFrame_clauseContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + RANGE_() antlr.TerminalNode + ROWS_() antlr.TerminalNode + GROUPS_() antlr.TerminalNode + Frame_single() IFrame_singleContext + BETWEEN_() antlr.TerminalNode + Frame_left() IFrame_leftContext + AND_() antlr.TerminalNode + Frame_right() IFrame_rightContext + + // IsFrame_clauseContext differentiates from other interfaces. + IsFrame_clauseContext() +} + +type Frame_clauseContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyFrame_clauseContext() *Frame_clauseContext { + var p = new(Frame_clauseContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_frame_clause + return p +} + +func (*Frame_clauseContext) IsFrame_clauseContext() {} + +func NewFrame_clauseContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Frame_clauseContext { + var p = new(Frame_clauseContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_frame_clause + + return p +} + +func (s *Frame_clauseContext) GetParser() antlr.Parser { return s.parser } + +func (s *Frame_clauseContext) RANGE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserRANGE_, 0) +} + +func (s *Frame_clauseContext) ROWS_() antlr.TerminalNode { + return s.GetToken(SQLiteParserROWS_, 0) +} + +func (s *Frame_clauseContext) GROUPS_() antlr.TerminalNode { + return s.GetToken(SQLiteParserGROUPS_, 0) +} + +func (s *Frame_clauseContext) Frame_single() IFrame_singleContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IFrame_singleContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IFrame_singleContext) +} + +func (s *Frame_clauseContext) BETWEEN_() antlr.TerminalNode { + return s.GetToken(SQLiteParserBETWEEN_, 0) +} + +func (s *Frame_clauseContext) Frame_left() IFrame_leftContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IFrame_leftContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IFrame_leftContext) +} + +func (s *Frame_clauseContext) AND_() antlr.TerminalNode { + return s.GetToken(SQLiteParserAND_, 0) +} + +func (s *Frame_clauseContext) Frame_right() IFrame_rightContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IFrame_rightContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IFrame_rightContext) +} + +func (s *Frame_clauseContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Frame_clauseContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Frame_clauseContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterFrame_clause(s) + } +} + +func (s *Frame_clauseContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitFrame_clause(s) + } +} + +func (p *SQLiteParser) Frame_clause() (localctx IFrame_clauseContext) { + this := p + _ = this + + localctx = NewFrame_clauseContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 126, SQLiteParserRULE_frame_clause) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(1718) + _la = p.GetTokenStream().LA(1) + + if !((int64((_la-128)) & ^0x3f) == 0 && ((int64(1)<<(_la-128))&2251799880794113) != 0) { + p.GetErrorHandler().RecoverInline(p) + } else { + p.GetErrorHandler().ReportMatch(p) + p.Consume() + } + } + p.SetState(1725) + p.GetErrorHandler().Sync(p) + switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 256, p.GetParserRuleContext()) { + case 1: + { + p.SetState(1719) + p.Frame_single() + } + + case 2: + { + p.SetState(1720) + p.Match(SQLiteParserBETWEEN_) + } + { + p.SetState(1721) + p.Frame_left() + } + { + p.SetState(1722) + p.Match(SQLiteParserAND_) + } + { + p.SetState(1723) + p.Frame_right() + } + + } + + return localctx +} + +// ISimple_function_invocationContext is an interface to support dynamic dispatch. +type ISimple_function_invocationContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + Simple_func() ISimple_funcContext + OPEN_PAR() antlr.TerminalNode + CLOSE_PAR() antlr.TerminalNode + AllExpr() []IExprContext + Expr(i int) IExprContext + STAR() antlr.TerminalNode + AllCOMMA() []antlr.TerminalNode + COMMA(i int) antlr.TerminalNode + + // IsSimple_function_invocationContext differentiates from other interfaces. + IsSimple_function_invocationContext() +} + +type Simple_function_invocationContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptySimple_function_invocationContext() *Simple_function_invocationContext { + var p = new(Simple_function_invocationContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_simple_function_invocation + return p +} + +func (*Simple_function_invocationContext) IsSimple_function_invocationContext() {} + +func NewSimple_function_invocationContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Simple_function_invocationContext { + var p = new(Simple_function_invocationContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_simple_function_invocation + + return p +} + +func (s *Simple_function_invocationContext) GetParser() antlr.Parser { return s.parser } + +func (s *Simple_function_invocationContext) Simple_func() ISimple_funcContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ISimple_funcContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ISimple_funcContext) +} + +func (s *Simple_function_invocationContext) OPEN_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserOPEN_PAR, 0) +} + +func (s *Simple_function_invocationContext) CLOSE_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserCLOSE_PAR, 0) +} + +func (s *Simple_function_invocationContext) AllExpr() []IExprContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IExprContext); ok { + len++ + } + } + + tst := make([]IExprContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IExprContext); ok { + tst[i] = t.(IExprContext) + i++ + } + } + + return tst +} + +func (s *Simple_function_invocationContext) Expr(i int) IExprContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IExprContext) +} + +func (s *Simple_function_invocationContext) STAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserSTAR, 0) +} + +func (s *Simple_function_invocationContext) AllCOMMA() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserCOMMA) +} + +func (s *Simple_function_invocationContext) COMMA(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserCOMMA, i) +} + +func (s *Simple_function_invocationContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Simple_function_invocationContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Simple_function_invocationContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterSimple_function_invocation(s) + } +} + +func (s *Simple_function_invocationContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitSimple_function_invocation(s) + } +} + +func (p *SQLiteParser) Simple_function_invocation() (localctx ISimple_function_invocationContext) { + this := p + _ = this + + localctx = NewSimple_function_invocationContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 128, SQLiteParserRULE_simple_function_invocation) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(1727) + p.Simple_func() + } + { + p.SetState(1728) + p.Match(SQLiteParserOPEN_PAR) + } + p.SetState(1738) + p.GetErrorHandler().Sync(p) + + switch p.GetTokenStream().LA(1) { + case SQLiteParserOPEN_PAR, SQLiteParserPLUS, SQLiteParserMINUS, SQLiteParserTILDE, SQLiteParserABORT_, SQLiteParserACTION_, SQLiteParserADD_, SQLiteParserAFTER_, SQLiteParserALL_, SQLiteParserALTER_, SQLiteParserANALYZE_, SQLiteParserAND_, SQLiteParserAS_, SQLiteParserASC_, SQLiteParserATTACH_, SQLiteParserAUTOINCREMENT_, SQLiteParserBEFORE_, SQLiteParserBEGIN_, SQLiteParserBETWEEN_, SQLiteParserBY_, SQLiteParserCASCADE_, SQLiteParserCASE_, SQLiteParserCAST_, SQLiteParserCHECK_, SQLiteParserCOLLATE_, SQLiteParserCOLUMN_, SQLiteParserCOMMIT_, SQLiteParserCONFLICT_, SQLiteParserCONSTRAINT_, SQLiteParserCREATE_, SQLiteParserCROSS_, SQLiteParserCURRENT_DATE_, SQLiteParserCURRENT_TIME_, SQLiteParserCURRENT_TIMESTAMP_, SQLiteParserDATABASE_, SQLiteParserDEFAULT_, SQLiteParserDEFERRABLE_, SQLiteParserDEFERRED_, SQLiteParserDELETE_, SQLiteParserDESC_, SQLiteParserDETACH_, SQLiteParserDISTINCT_, SQLiteParserDROP_, SQLiteParserEACH_, SQLiteParserELSE_, SQLiteParserEND_, SQLiteParserESCAPE_, SQLiteParserEXCEPT_, SQLiteParserEXCLUSIVE_, SQLiteParserEXISTS_, SQLiteParserEXPLAIN_, SQLiteParserFAIL_, SQLiteParserFOR_, SQLiteParserFOREIGN_, SQLiteParserFROM_, SQLiteParserFULL_, SQLiteParserGLOB_, SQLiteParserGROUP_, SQLiteParserHAVING_, SQLiteParserIF_, SQLiteParserIGNORE_, SQLiteParserIMMEDIATE_, SQLiteParserIN_, SQLiteParserINDEX_, SQLiteParserINDEXED_, SQLiteParserINITIALLY_, SQLiteParserINNER_, SQLiteParserINSERT_, SQLiteParserINSTEAD_, SQLiteParserINTERSECT_, SQLiteParserINTO_, SQLiteParserIS_, SQLiteParserISNULL_, SQLiteParserJOIN_, SQLiteParserKEY_, SQLiteParserLEFT_, SQLiteParserLIKE_, SQLiteParserLIMIT_, SQLiteParserMATCH_, SQLiteParserNATURAL_, SQLiteParserNO_, SQLiteParserNOT_, SQLiteParserNOTNULL_, SQLiteParserNULL_, SQLiteParserOF_, SQLiteParserOFFSET_, SQLiteParserON_, SQLiteParserOR_, SQLiteParserORDER_, SQLiteParserOUTER_, SQLiteParserPLAN_, SQLiteParserPRAGMA_, SQLiteParserPRIMARY_, SQLiteParserQUERY_, SQLiteParserRAISE_, SQLiteParserRECURSIVE_, SQLiteParserREFERENCES_, SQLiteParserREGEXP_, SQLiteParserREINDEX_, SQLiteParserRELEASE_, SQLiteParserRENAME_, SQLiteParserREPLACE_, SQLiteParserRESTRICT_, SQLiteParserRIGHT_, SQLiteParserROLLBACK_, SQLiteParserROW_, SQLiteParserROWS_, SQLiteParserSAVEPOINT_, SQLiteParserSELECT_, SQLiteParserSET_, SQLiteParserTABLE_, SQLiteParserTEMP_, SQLiteParserTEMPORARY_, SQLiteParserTHEN_, SQLiteParserTO_, SQLiteParserTRANSACTION_, SQLiteParserTRIGGER_, SQLiteParserUNION_, SQLiteParserUNIQUE_, SQLiteParserUPDATE_, SQLiteParserUSING_, SQLiteParserVACUUM_, SQLiteParserVALUES_, SQLiteParserVIEW_, SQLiteParserVIRTUAL_, SQLiteParserWHEN_, SQLiteParserWHERE_, SQLiteParserWITH_, SQLiteParserWITHOUT_, SQLiteParserFIRST_VALUE_, SQLiteParserOVER_, SQLiteParserPARTITION_, SQLiteParserRANGE_, SQLiteParserPRECEDING_, SQLiteParserUNBOUNDED_, SQLiteParserCURRENT_, SQLiteParserFOLLOWING_, SQLiteParserCUME_DIST_, SQLiteParserDENSE_RANK_, SQLiteParserLAG_, SQLiteParserLAST_VALUE_, SQLiteParserLEAD_, SQLiteParserNTH_VALUE_, SQLiteParserNTILE_, SQLiteParserPERCENT_RANK_, SQLiteParserRANK_, SQLiteParserROW_NUMBER_, SQLiteParserGENERATED_, SQLiteParserALWAYS_, SQLiteParserSTORED_, SQLiteParserTRUE_, SQLiteParserFALSE_, SQLiteParserWINDOW_, SQLiteParserNULLS_, SQLiteParserFIRST_, SQLiteParserLAST_, SQLiteParserFILTER_, SQLiteParserGROUPS_, SQLiteParserEXCLUDE_, SQLiteParserIDENTIFIER, SQLiteParserNUMERIC_LITERAL, SQLiteParserBIND_PARAMETER, SQLiteParserSTRING_LITERAL, SQLiteParserBLOB_LITERAL: + { + p.SetState(1729) + p.expr(0) + } + p.SetState(1734) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for _la == SQLiteParserCOMMA { + { + p.SetState(1730) + p.Match(SQLiteParserCOMMA) + } + { + p.SetState(1731) + p.expr(0) + } + + p.SetState(1736) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + + case SQLiteParserSTAR: + { + p.SetState(1737) + p.Match(SQLiteParserSTAR) + } + + default: + panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) + } + { + p.SetState(1740) + p.Match(SQLiteParserCLOSE_PAR) + } + + return localctx +} + +// IAggregate_function_invocationContext is an interface to support dynamic dispatch. +type IAggregate_function_invocationContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + Aggregate_func() IAggregate_funcContext + OPEN_PAR() antlr.TerminalNode + CLOSE_PAR() antlr.TerminalNode + AllExpr() []IExprContext + Expr(i int) IExprContext + STAR() antlr.TerminalNode + Filter_clause() IFilter_clauseContext + DISTINCT_() antlr.TerminalNode + AllCOMMA() []antlr.TerminalNode + COMMA(i int) antlr.TerminalNode + + // IsAggregate_function_invocationContext differentiates from other interfaces. + IsAggregate_function_invocationContext() +} + +type Aggregate_function_invocationContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyAggregate_function_invocationContext() *Aggregate_function_invocationContext { + var p = new(Aggregate_function_invocationContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_aggregate_function_invocation + return p +} + +func (*Aggregate_function_invocationContext) IsAggregate_function_invocationContext() {} + +func NewAggregate_function_invocationContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Aggregate_function_invocationContext { + var p = new(Aggregate_function_invocationContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_aggregate_function_invocation + + return p +} + +func (s *Aggregate_function_invocationContext) GetParser() antlr.Parser { return s.parser } + +func (s *Aggregate_function_invocationContext) Aggregate_func() IAggregate_funcContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IAggregate_funcContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IAggregate_funcContext) +} + +func (s *Aggregate_function_invocationContext) OPEN_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserOPEN_PAR, 0) +} + +func (s *Aggregate_function_invocationContext) CLOSE_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserCLOSE_PAR, 0) +} + +func (s *Aggregate_function_invocationContext) AllExpr() []IExprContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IExprContext); ok { + len++ + } + } + + tst := make([]IExprContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IExprContext); ok { + tst[i] = t.(IExprContext) + i++ + } + } + + return tst +} + +func (s *Aggregate_function_invocationContext) Expr(i int) IExprContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IExprContext) +} + +func (s *Aggregate_function_invocationContext) STAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserSTAR, 0) +} + +func (s *Aggregate_function_invocationContext) Filter_clause() IFilter_clauseContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IFilter_clauseContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IFilter_clauseContext) +} + +func (s *Aggregate_function_invocationContext) DISTINCT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserDISTINCT_, 0) +} + +func (s *Aggregate_function_invocationContext) AllCOMMA() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserCOMMA) +} + +func (s *Aggregate_function_invocationContext) COMMA(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserCOMMA, i) +} + +func (s *Aggregate_function_invocationContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Aggregate_function_invocationContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Aggregate_function_invocationContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterAggregate_function_invocation(s) + } +} + +func (s *Aggregate_function_invocationContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitAggregate_function_invocation(s) + } +} + +func (p *SQLiteParser) Aggregate_function_invocation() (localctx IAggregate_function_invocationContext) { + this := p + _ = this + + localctx = NewAggregate_function_invocationContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 130, SQLiteParserRULE_aggregate_function_invocation) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(1742) + p.Aggregate_func() + } + { + p.SetState(1743) + p.Match(SQLiteParserOPEN_PAR) + } + p.SetState(1756) + p.GetErrorHandler().Sync(p) + + switch p.GetTokenStream().LA(1) { + case SQLiteParserOPEN_PAR, SQLiteParserPLUS, SQLiteParserMINUS, SQLiteParserTILDE, SQLiteParserABORT_, SQLiteParserACTION_, SQLiteParserADD_, SQLiteParserAFTER_, SQLiteParserALL_, SQLiteParserALTER_, SQLiteParserANALYZE_, SQLiteParserAND_, SQLiteParserAS_, SQLiteParserASC_, SQLiteParserATTACH_, SQLiteParserAUTOINCREMENT_, SQLiteParserBEFORE_, SQLiteParserBEGIN_, SQLiteParserBETWEEN_, SQLiteParserBY_, SQLiteParserCASCADE_, SQLiteParserCASE_, SQLiteParserCAST_, SQLiteParserCHECK_, SQLiteParserCOLLATE_, SQLiteParserCOLUMN_, SQLiteParserCOMMIT_, SQLiteParserCONFLICT_, SQLiteParserCONSTRAINT_, SQLiteParserCREATE_, SQLiteParserCROSS_, SQLiteParserCURRENT_DATE_, SQLiteParserCURRENT_TIME_, SQLiteParserCURRENT_TIMESTAMP_, SQLiteParserDATABASE_, SQLiteParserDEFAULT_, SQLiteParserDEFERRABLE_, SQLiteParserDEFERRED_, SQLiteParserDELETE_, SQLiteParserDESC_, SQLiteParserDETACH_, SQLiteParserDISTINCT_, SQLiteParserDROP_, SQLiteParserEACH_, SQLiteParserELSE_, SQLiteParserEND_, SQLiteParserESCAPE_, SQLiteParserEXCEPT_, SQLiteParserEXCLUSIVE_, SQLiteParserEXISTS_, SQLiteParserEXPLAIN_, SQLiteParserFAIL_, SQLiteParserFOR_, SQLiteParserFOREIGN_, SQLiteParserFROM_, SQLiteParserFULL_, SQLiteParserGLOB_, SQLiteParserGROUP_, SQLiteParserHAVING_, SQLiteParserIF_, SQLiteParserIGNORE_, SQLiteParserIMMEDIATE_, SQLiteParserIN_, SQLiteParserINDEX_, SQLiteParserINDEXED_, SQLiteParserINITIALLY_, SQLiteParserINNER_, SQLiteParserINSERT_, SQLiteParserINSTEAD_, SQLiteParserINTERSECT_, SQLiteParserINTO_, SQLiteParserIS_, SQLiteParserISNULL_, SQLiteParserJOIN_, SQLiteParserKEY_, SQLiteParserLEFT_, SQLiteParserLIKE_, SQLiteParserLIMIT_, SQLiteParserMATCH_, SQLiteParserNATURAL_, SQLiteParserNO_, SQLiteParserNOT_, SQLiteParserNOTNULL_, SQLiteParserNULL_, SQLiteParserOF_, SQLiteParserOFFSET_, SQLiteParserON_, SQLiteParserOR_, SQLiteParserORDER_, SQLiteParserOUTER_, SQLiteParserPLAN_, SQLiteParserPRAGMA_, SQLiteParserPRIMARY_, SQLiteParserQUERY_, SQLiteParserRAISE_, SQLiteParserRECURSIVE_, SQLiteParserREFERENCES_, SQLiteParserREGEXP_, SQLiteParserREINDEX_, SQLiteParserRELEASE_, SQLiteParserRENAME_, SQLiteParserREPLACE_, SQLiteParserRESTRICT_, SQLiteParserRIGHT_, SQLiteParserROLLBACK_, SQLiteParserROW_, SQLiteParserROWS_, SQLiteParserSAVEPOINT_, SQLiteParserSELECT_, SQLiteParserSET_, SQLiteParserTABLE_, SQLiteParserTEMP_, SQLiteParserTEMPORARY_, SQLiteParserTHEN_, SQLiteParserTO_, SQLiteParserTRANSACTION_, SQLiteParserTRIGGER_, SQLiteParserUNION_, SQLiteParserUNIQUE_, SQLiteParserUPDATE_, SQLiteParserUSING_, SQLiteParserVACUUM_, SQLiteParserVALUES_, SQLiteParserVIEW_, SQLiteParserVIRTUAL_, SQLiteParserWHEN_, SQLiteParserWHERE_, SQLiteParserWITH_, SQLiteParserWITHOUT_, SQLiteParserFIRST_VALUE_, SQLiteParserOVER_, SQLiteParserPARTITION_, SQLiteParserRANGE_, SQLiteParserPRECEDING_, SQLiteParserUNBOUNDED_, SQLiteParserCURRENT_, SQLiteParserFOLLOWING_, SQLiteParserCUME_DIST_, SQLiteParserDENSE_RANK_, SQLiteParserLAG_, SQLiteParserLAST_VALUE_, SQLiteParserLEAD_, SQLiteParserNTH_VALUE_, SQLiteParserNTILE_, SQLiteParserPERCENT_RANK_, SQLiteParserRANK_, SQLiteParserROW_NUMBER_, SQLiteParserGENERATED_, SQLiteParserALWAYS_, SQLiteParserSTORED_, SQLiteParserTRUE_, SQLiteParserFALSE_, SQLiteParserWINDOW_, SQLiteParserNULLS_, SQLiteParserFIRST_, SQLiteParserLAST_, SQLiteParserFILTER_, SQLiteParserGROUPS_, SQLiteParserEXCLUDE_, SQLiteParserIDENTIFIER, SQLiteParserNUMERIC_LITERAL, SQLiteParserBIND_PARAMETER, SQLiteParserSTRING_LITERAL, SQLiteParserBLOB_LITERAL: + p.SetState(1745) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 259, p.GetParserRuleContext()) == 1 { + { + p.SetState(1744) + p.Match(SQLiteParserDISTINCT_) + } + + } + { + p.SetState(1747) + p.expr(0) + } + p.SetState(1752) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for _la == SQLiteParserCOMMA { + { + p.SetState(1748) + p.Match(SQLiteParserCOMMA) + } + { + p.SetState(1749) + p.expr(0) + } + + p.SetState(1754) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + + case SQLiteParserSTAR: + { + p.SetState(1755) + p.Match(SQLiteParserSTAR) + } + + case SQLiteParserCLOSE_PAR: + + default: + } + { + p.SetState(1758) + p.Match(SQLiteParserCLOSE_PAR) + } + p.SetState(1760) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserFILTER_ { + { + p.SetState(1759) + p.Filter_clause() + } + + } + + return localctx +} + +// IWindow_function_invocationContext is an interface to support dynamic dispatch. +type IWindow_function_invocationContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + Window_function() IWindow_functionContext + OPEN_PAR() antlr.TerminalNode + CLOSE_PAR() antlr.TerminalNode + OVER_() antlr.TerminalNode + Window_defn() IWindow_defnContext + Window_name() IWindow_nameContext + AllExpr() []IExprContext + Expr(i int) IExprContext + STAR() antlr.TerminalNode + Filter_clause() IFilter_clauseContext + AllCOMMA() []antlr.TerminalNode + COMMA(i int) antlr.TerminalNode + + // IsWindow_function_invocationContext differentiates from other interfaces. + IsWindow_function_invocationContext() +} + +type Window_function_invocationContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyWindow_function_invocationContext() *Window_function_invocationContext { + var p = new(Window_function_invocationContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_window_function_invocation + return p +} + +func (*Window_function_invocationContext) IsWindow_function_invocationContext() {} + +func NewWindow_function_invocationContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Window_function_invocationContext { + var p = new(Window_function_invocationContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_window_function_invocation + + return p +} + +func (s *Window_function_invocationContext) GetParser() antlr.Parser { return s.parser } + +func (s *Window_function_invocationContext) Window_function() IWindow_functionContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IWindow_functionContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IWindow_functionContext) +} + +func (s *Window_function_invocationContext) OPEN_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserOPEN_PAR, 0) +} + +func (s *Window_function_invocationContext) CLOSE_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserCLOSE_PAR, 0) +} + +func (s *Window_function_invocationContext) OVER_() antlr.TerminalNode { + return s.GetToken(SQLiteParserOVER_, 0) +} + +func (s *Window_function_invocationContext) Window_defn() IWindow_defnContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IWindow_defnContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IWindow_defnContext) +} + +func (s *Window_function_invocationContext) Window_name() IWindow_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IWindow_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IWindow_nameContext) +} + +func (s *Window_function_invocationContext) AllExpr() []IExprContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IExprContext); ok { + len++ + } + } + + tst := make([]IExprContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IExprContext); ok { + tst[i] = t.(IExprContext) + i++ + } + } + + return tst +} + +func (s *Window_function_invocationContext) Expr(i int) IExprContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IExprContext) +} + +func (s *Window_function_invocationContext) STAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserSTAR, 0) +} + +func (s *Window_function_invocationContext) Filter_clause() IFilter_clauseContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IFilter_clauseContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IFilter_clauseContext) +} + +func (s *Window_function_invocationContext) AllCOMMA() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserCOMMA) +} + +func (s *Window_function_invocationContext) COMMA(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserCOMMA, i) +} + +func (s *Window_function_invocationContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Window_function_invocationContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Window_function_invocationContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterWindow_function_invocation(s) + } +} + +func (s *Window_function_invocationContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitWindow_function_invocation(s) + } +} + +func (p *SQLiteParser) Window_function_invocation() (localctx IWindow_function_invocationContext) { + this := p + _ = this + + localctx = NewWindow_function_invocationContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 132, SQLiteParserRULE_window_function_invocation) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(1762) + p.Window_function() + } + { + p.SetState(1763) + p.Match(SQLiteParserOPEN_PAR) + } + p.SetState(1773) + p.GetErrorHandler().Sync(p) + + switch p.GetTokenStream().LA(1) { + case SQLiteParserOPEN_PAR, SQLiteParserPLUS, SQLiteParserMINUS, SQLiteParserTILDE, SQLiteParserABORT_, SQLiteParserACTION_, SQLiteParserADD_, SQLiteParserAFTER_, SQLiteParserALL_, SQLiteParserALTER_, SQLiteParserANALYZE_, SQLiteParserAND_, SQLiteParserAS_, SQLiteParserASC_, SQLiteParserATTACH_, SQLiteParserAUTOINCREMENT_, SQLiteParserBEFORE_, SQLiteParserBEGIN_, SQLiteParserBETWEEN_, SQLiteParserBY_, SQLiteParserCASCADE_, SQLiteParserCASE_, SQLiteParserCAST_, SQLiteParserCHECK_, SQLiteParserCOLLATE_, SQLiteParserCOLUMN_, SQLiteParserCOMMIT_, SQLiteParserCONFLICT_, SQLiteParserCONSTRAINT_, SQLiteParserCREATE_, SQLiteParserCROSS_, SQLiteParserCURRENT_DATE_, SQLiteParserCURRENT_TIME_, SQLiteParserCURRENT_TIMESTAMP_, SQLiteParserDATABASE_, SQLiteParserDEFAULT_, SQLiteParserDEFERRABLE_, SQLiteParserDEFERRED_, SQLiteParserDELETE_, SQLiteParserDESC_, SQLiteParserDETACH_, SQLiteParserDISTINCT_, SQLiteParserDROP_, SQLiteParserEACH_, SQLiteParserELSE_, SQLiteParserEND_, SQLiteParserESCAPE_, SQLiteParserEXCEPT_, SQLiteParserEXCLUSIVE_, SQLiteParserEXISTS_, SQLiteParserEXPLAIN_, SQLiteParserFAIL_, SQLiteParserFOR_, SQLiteParserFOREIGN_, SQLiteParserFROM_, SQLiteParserFULL_, SQLiteParserGLOB_, SQLiteParserGROUP_, SQLiteParserHAVING_, SQLiteParserIF_, SQLiteParserIGNORE_, SQLiteParserIMMEDIATE_, SQLiteParserIN_, SQLiteParserINDEX_, SQLiteParserINDEXED_, SQLiteParserINITIALLY_, SQLiteParserINNER_, SQLiteParserINSERT_, SQLiteParserINSTEAD_, SQLiteParserINTERSECT_, SQLiteParserINTO_, SQLiteParserIS_, SQLiteParserISNULL_, SQLiteParserJOIN_, SQLiteParserKEY_, SQLiteParserLEFT_, SQLiteParserLIKE_, SQLiteParserLIMIT_, SQLiteParserMATCH_, SQLiteParserNATURAL_, SQLiteParserNO_, SQLiteParserNOT_, SQLiteParserNOTNULL_, SQLiteParserNULL_, SQLiteParserOF_, SQLiteParserOFFSET_, SQLiteParserON_, SQLiteParserOR_, SQLiteParserORDER_, SQLiteParserOUTER_, SQLiteParserPLAN_, SQLiteParserPRAGMA_, SQLiteParserPRIMARY_, SQLiteParserQUERY_, SQLiteParserRAISE_, SQLiteParserRECURSIVE_, SQLiteParserREFERENCES_, SQLiteParserREGEXP_, SQLiteParserREINDEX_, SQLiteParserRELEASE_, SQLiteParserRENAME_, SQLiteParserREPLACE_, SQLiteParserRESTRICT_, SQLiteParserRIGHT_, SQLiteParserROLLBACK_, SQLiteParserROW_, SQLiteParserROWS_, SQLiteParserSAVEPOINT_, SQLiteParserSELECT_, SQLiteParserSET_, SQLiteParserTABLE_, SQLiteParserTEMP_, SQLiteParserTEMPORARY_, SQLiteParserTHEN_, SQLiteParserTO_, SQLiteParserTRANSACTION_, SQLiteParserTRIGGER_, SQLiteParserUNION_, SQLiteParserUNIQUE_, SQLiteParserUPDATE_, SQLiteParserUSING_, SQLiteParserVACUUM_, SQLiteParserVALUES_, SQLiteParserVIEW_, SQLiteParserVIRTUAL_, SQLiteParserWHEN_, SQLiteParserWHERE_, SQLiteParserWITH_, SQLiteParserWITHOUT_, SQLiteParserFIRST_VALUE_, SQLiteParserOVER_, SQLiteParserPARTITION_, SQLiteParserRANGE_, SQLiteParserPRECEDING_, SQLiteParserUNBOUNDED_, SQLiteParserCURRENT_, SQLiteParserFOLLOWING_, SQLiteParserCUME_DIST_, SQLiteParserDENSE_RANK_, SQLiteParserLAG_, SQLiteParserLAST_VALUE_, SQLiteParserLEAD_, SQLiteParserNTH_VALUE_, SQLiteParserNTILE_, SQLiteParserPERCENT_RANK_, SQLiteParserRANK_, SQLiteParserROW_NUMBER_, SQLiteParserGENERATED_, SQLiteParserALWAYS_, SQLiteParserSTORED_, SQLiteParserTRUE_, SQLiteParserFALSE_, SQLiteParserWINDOW_, SQLiteParserNULLS_, SQLiteParserFIRST_, SQLiteParserLAST_, SQLiteParserFILTER_, SQLiteParserGROUPS_, SQLiteParserEXCLUDE_, SQLiteParserIDENTIFIER, SQLiteParserNUMERIC_LITERAL, SQLiteParserBIND_PARAMETER, SQLiteParserSTRING_LITERAL, SQLiteParserBLOB_LITERAL: + { + p.SetState(1764) + p.expr(0) + } + p.SetState(1769) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for _la == SQLiteParserCOMMA { + { + p.SetState(1765) + p.Match(SQLiteParserCOMMA) + } + { + p.SetState(1766) + p.expr(0) + } + + p.SetState(1771) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + + case SQLiteParserSTAR: + { + p.SetState(1772) + p.Match(SQLiteParserSTAR) + } + + case SQLiteParserCLOSE_PAR: + + default: + } + { + p.SetState(1775) + p.Match(SQLiteParserCLOSE_PAR) + } + p.SetState(1777) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserFILTER_ { + { + p.SetState(1776) + p.Filter_clause() + } + + } + { + p.SetState(1779) + p.Match(SQLiteParserOVER_) + } + p.SetState(1782) + p.GetErrorHandler().Sync(p) + switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 266, p.GetParserRuleContext()) { + case 1: + { + p.SetState(1780) + p.Window_defn() + } + + case 2: + { + p.SetState(1781) + p.Window_name() + } + + } + + return localctx +} + +// ICommon_table_stmtContext is an interface to support dynamic dispatch. +type ICommon_table_stmtContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + WITH_() antlr.TerminalNode + AllCommon_table_expression() []ICommon_table_expressionContext + Common_table_expression(i int) ICommon_table_expressionContext + RECURSIVE_() antlr.TerminalNode + AllCOMMA() []antlr.TerminalNode + COMMA(i int) antlr.TerminalNode + + // IsCommon_table_stmtContext differentiates from other interfaces. + IsCommon_table_stmtContext() +} + +type Common_table_stmtContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyCommon_table_stmtContext() *Common_table_stmtContext { + var p = new(Common_table_stmtContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_common_table_stmt + return p +} + +func (*Common_table_stmtContext) IsCommon_table_stmtContext() {} + +func NewCommon_table_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Common_table_stmtContext { + var p = new(Common_table_stmtContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_common_table_stmt + + return p +} + +func (s *Common_table_stmtContext) GetParser() antlr.Parser { return s.parser } + +func (s *Common_table_stmtContext) WITH_() antlr.TerminalNode { + return s.GetToken(SQLiteParserWITH_, 0) +} + +func (s *Common_table_stmtContext) AllCommon_table_expression() []ICommon_table_expressionContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(ICommon_table_expressionContext); ok { + len++ + } + } + + tst := make([]ICommon_table_expressionContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(ICommon_table_expressionContext); ok { + tst[i] = t.(ICommon_table_expressionContext) + i++ + } + } + + return tst +} + +func (s *Common_table_stmtContext) Common_table_expression(i int) ICommon_table_expressionContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ICommon_table_expressionContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(ICommon_table_expressionContext) +} + +func (s *Common_table_stmtContext) RECURSIVE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserRECURSIVE_, 0) +} + +func (s *Common_table_stmtContext) AllCOMMA() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserCOMMA) +} + +func (s *Common_table_stmtContext) COMMA(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserCOMMA, i) +} + +func (s *Common_table_stmtContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Common_table_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Common_table_stmtContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterCommon_table_stmt(s) + } +} + +func (s *Common_table_stmtContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitCommon_table_stmt(s) + } +} + +func (p *SQLiteParser) Common_table_stmt() (localctx ICommon_table_stmtContext) { + this := p + _ = this + + localctx = NewCommon_table_stmtContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 134, SQLiteParserRULE_common_table_stmt) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(1784) + p.Match(SQLiteParserWITH_) + } + p.SetState(1786) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 267, p.GetParserRuleContext()) == 1 { + { + p.SetState(1785) + p.Match(SQLiteParserRECURSIVE_) + } + + } + { + p.SetState(1788) + p.Common_table_expression() + } + p.SetState(1793) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for _la == SQLiteParserCOMMA { + { + p.SetState(1789) + p.Match(SQLiteParserCOMMA) + } + { + p.SetState(1790) + p.Common_table_expression() + } + + p.SetState(1795) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + + return localctx +} + +// IOrder_by_stmtContext is an interface to support dynamic dispatch. +type IOrder_by_stmtContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + ORDER_() antlr.TerminalNode + BY_() antlr.TerminalNode + AllOrdering_term() []IOrdering_termContext + Ordering_term(i int) IOrdering_termContext + AllCOMMA() []antlr.TerminalNode + COMMA(i int) antlr.TerminalNode + + // IsOrder_by_stmtContext differentiates from other interfaces. + IsOrder_by_stmtContext() +} + +type Order_by_stmtContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyOrder_by_stmtContext() *Order_by_stmtContext { + var p = new(Order_by_stmtContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_order_by_stmt + return p +} + +func (*Order_by_stmtContext) IsOrder_by_stmtContext() {} + +func NewOrder_by_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Order_by_stmtContext { + var p = new(Order_by_stmtContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_order_by_stmt + + return p +} + +func (s *Order_by_stmtContext) GetParser() antlr.Parser { return s.parser } + +func (s *Order_by_stmtContext) ORDER_() antlr.TerminalNode { + return s.GetToken(SQLiteParserORDER_, 0) +} + +func (s *Order_by_stmtContext) BY_() antlr.TerminalNode { + return s.GetToken(SQLiteParserBY_, 0) +} + +func (s *Order_by_stmtContext) AllOrdering_term() []IOrdering_termContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IOrdering_termContext); ok { + len++ + } + } + + tst := make([]IOrdering_termContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IOrdering_termContext); ok { + tst[i] = t.(IOrdering_termContext) + i++ + } + } + + return tst +} + +func (s *Order_by_stmtContext) Ordering_term(i int) IOrdering_termContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IOrdering_termContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IOrdering_termContext) +} + +func (s *Order_by_stmtContext) AllCOMMA() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserCOMMA) +} + +func (s *Order_by_stmtContext) COMMA(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserCOMMA, i) +} + +func (s *Order_by_stmtContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Order_by_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Order_by_stmtContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterOrder_by_stmt(s) + } +} + +func (s *Order_by_stmtContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitOrder_by_stmt(s) + } +} + +func (p *SQLiteParser) Order_by_stmt() (localctx IOrder_by_stmtContext) { + this := p + _ = this + + localctx = NewOrder_by_stmtContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 136, SQLiteParserRULE_order_by_stmt) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(1796) + p.Match(SQLiteParserORDER_) + } + { + p.SetState(1797) + p.Match(SQLiteParserBY_) + } + { + p.SetState(1798) + p.Ordering_term() + } + p.SetState(1803) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for _la == SQLiteParserCOMMA { + { + p.SetState(1799) + p.Match(SQLiteParserCOMMA) + } + { + p.SetState(1800) + p.Ordering_term() + } + + p.SetState(1805) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + + return localctx +} + +// ILimit_stmtContext is an interface to support dynamic dispatch. +type ILimit_stmtContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + LIMIT_() antlr.TerminalNode + AllExpr() []IExprContext + Expr(i int) IExprContext + OFFSET_() antlr.TerminalNode + COMMA() antlr.TerminalNode + + // IsLimit_stmtContext differentiates from other interfaces. + IsLimit_stmtContext() +} + +type Limit_stmtContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyLimit_stmtContext() *Limit_stmtContext { + var p = new(Limit_stmtContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_limit_stmt + return p +} + +func (*Limit_stmtContext) IsLimit_stmtContext() {} + +func NewLimit_stmtContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Limit_stmtContext { + var p = new(Limit_stmtContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_limit_stmt + + return p +} + +func (s *Limit_stmtContext) GetParser() antlr.Parser { return s.parser } + +func (s *Limit_stmtContext) LIMIT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserLIMIT_, 0) +} + +func (s *Limit_stmtContext) AllExpr() []IExprContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IExprContext); ok { + len++ + } + } + + tst := make([]IExprContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IExprContext); ok { + tst[i] = t.(IExprContext) + i++ + } + } + + return tst +} + +func (s *Limit_stmtContext) Expr(i int) IExprContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IExprContext) +} + +func (s *Limit_stmtContext) OFFSET_() antlr.TerminalNode { + return s.GetToken(SQLiteParserOFFSET_, 0) +} + +func (s *Limit_stmtContext) COMMA() antlr.TerminalNode { + return s.GetToken(SQLiteParserCOMMA, 0) +} + +func (s *Limit_stmtContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Limit_stmtContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Limit_stmtContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterLimit_stmt(s) + } +} + +func (s *Limit_stmtContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitLimit_stmt(s) + } +} + +func (p *SQLiteParser) Limit_stmt() (localctx ILimit_stmtContext) { + this := p + _ = this + + localctx = NewLimit_stmtContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 138, SQLiteParserRULE_limit_stmt) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(1806) + p.Match(SQLiteParserLIMIT_) + } + { + p.SetState(1807) + p.expr(0) + } + p.SetState(1810) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserCOMMA || _la == SQLiteParserOFFSET_ { + { + p.SetState(1808) + _la = p.GetTokenStream().LA(1) + + if !(_la == SQLiteParserCOMMA || _la == SQLiteParserOFFSET_) { + p.GetErrorHandler().RecoverInline(p) + } else { + p.GetErrorHandler().ReportMatch(p) + p.Consume() + } + } + { + p.SetState(1809) + p.expr(0) + } + + } + + return localctx +} + +// IOrdering_termContext is an interface to support dynamic dispatch. +type IOrdering_termContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + Expr() IExprContext + COLLATE_() antlr.TerminalNode + Collation_name() ICollation_nameContext + Asc_desc() IAsc_descContext + NULLS_() antlr.TerminalNode + FIRST_() antlr.TerminalNode + LAST_() antlr.TerminalNode + + // IsOrdering_termContext differentiates from other interfaces. + IsOrdering_termContext() +} + +type Ordering_termContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyOrdering_termContext() *Ordering_termContext { + var p = new(Ordering_termContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_ordering_term + return p +} + +func (*Ordering_termContext) IsOrdering_termContext() {} + +func NewOrdering_termContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Ordering_termContext { + var p = new(Ordering_termContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_ordering_term + + return p +} + +func (s *Ordering_termContext) GetParser() antlr.Parser { return s.parser } + +func (s *Ordering_termContext) Expr() IExprContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IExprContext) +} + +func (s *Ordering_termContext) COLLATE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserCOLLATE_, 0) +} + +func (s *Ordering_termContext) Collation_name() ICollation_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ICollation_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ICollation_nameContext) +} + +func (s *Ordering_termContext) Asc_desc() IAsc_descContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IAsc_descContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IAsc_descContext) +} + +func (s *Ordering_termContext) NULLS_() antlr.TerminalNode { + return s.GetToken(SQLiteParserNULLS_, 0) +} + +func (s *Ordering_termContext) FIRST_() antlr.TerminalNode { + return s.GetToken(SQLiteParserFIRST_, 0) +} + +func (s *Ordering_termContext) LAST_() antlr.TerminalNode { + return s.GetToken(SQLiteParserLAST_, 0) +} + +func (s *Ordering_termContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Ordering_termContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Ordering_termContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterOrdering_term(s) + } +} + +func (s *Ordering_termContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitOrdering_term(s) + } +} + +func (p *SQLiteParser) Ordering_term() (localctx IOrdering_termContext) { + this := p + _ = this + + localctx = NewOrdering_termContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 140, SQLiteParserRULE_ordering_term) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(1812) + p.expr(0) + } + p.SetState(1815) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserCOLLATE_ { + { + p.SetState(1813) + p.Match(SQLiteParserCOLLATE_) + } + { + p.SetState(1814) + p.Collation_name() + } + + } + p.SetState(1818) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserASC_ || _la == SQLiteParserDESC_ { + { + p.SetState(1817) + p.Asc_desc() + } + + } + p.SetState(1822) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserNULLS_ { + { + p.SetState(1820) + p.Match(SQLiteParserNULLS_) + } + { + p.SetState(1821) + _la = p.GetTokenStream().LA(1) + + if !(_la == SQLiteParserFIRST_ || _la == SQLiteParserLAST_) { + p.GetErrorHandler().RecoverInline(p) + } else { + p.GetErrorHandler().ReportMatch(p) + p.Consume() + } + } + + } + + return localctx +} + +// IAsc_descContext is an interface to support dynamic dispatch. +type IAsc_descContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + ASC_() antlr.TerminalNode + DESC_() antlr.TerminalNode + + // IsAsc_descContext differentiates from other interfaces. + IsAsc_descContext() +} + +type Asc_descContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyAsc_descContext() *Asc_descContext { + var p = new(Asc_descContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_asc_desc + return p +} + +func (*Asc_descContext) IsAsc_descContext() {} + +func NewAsc_descContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Asc_descContext { + var p = new(Asc_descContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_asc_desc + + return p +} + +func (s *Asc_descContext) GetParser() antlr.Parser { return s.parser } + +func (s *Asc_descContext) ASC_() antlr.TerminalNode { + return s.GetToken(SQLiteParserASC_, 0) +} + +func (s *Asc_descContext) DESC_() antlr.TerminalNode { + return s.GetToken(SQLiteParserDESC_, 0) +} + +func (s *Asc_descContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Asc_descContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Asc_descContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterAsc_desc(s) + } +} + +func (s *Asc_descContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitAsc_desc(s) + } +} + +func (p *SQLiteParser) Asc_desc() (localctx IAsc_descContext) { + this := p + _ = this + + localctx = NewAsc_descContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 142, SQLiteParserRULE_asc_desc) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(1824) + _la = p.GetTokenStream().LA(1) + + if !(_la == SQLiteParserASC_ || _la == SQLiteParserDESC_) { + p.GetErrorHandler().RecoverInline(p) + } else { + p.GetErrorHandler().ReportMatch(p) + p.Consume() + } + } + + return localctx +} + +// IFrame_leftContext is an interface to support dynamic dispatch. +type IFrame_leftContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + Expr() IExprContext + PRECEDING_() antlr.TerminalNode + FOLLOWING_() antlr.TerminalNode + CURRENT_() antlr.TerminalNode + ROW_() antlr.TerminalNode + UNBOUNDED_() antlr.TerminalNode + + // IsFrame_leftContext differentiates from other interfaces. + IsFrame_leftContext() +} + +type Frame_leftContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyFrame_leftContext() *Frame_leftContext { + var p = new(Frame_leftContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_frame_left + return p +} + +func (*Frame_leftContext) IsFrame_leftContext() {} + +func NewFrame_leftContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Frame_leftContext { + var p = new(Frame_leftContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_frame_left + + return p +} + +func (s *Frame_leftContext) GetParser() antlr.Parser { return s.parser } + +func (s *Frame_leftContext) Expr() IExprContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IExprContext) +} + +func (s *Frame_leftContext) PRECEDING_() antlr.TerminalNode { + return s.GetToken(SQLiteParserPRECEDING_, 0) +} + +func (s *Frame_leftContext) FOLLOWING_() antlr.TerminalNode { + return s.GetToken(SQLiteParserFOLLOWING_, 0) +} + +func (s *Frame_leftContext) CURRENT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserCURRENT_, 0) +} + +func (s *Frame_leftContext) ROW_() antlr.TerminalNode { + return s.GetToken(SQLiteParserROW_, 0) +} + +func (s *Frame_leftContext) UNBOUNDED_() antlr.TerminalNode { + return s.GetToken(SQLiteParserUNBOUNDED_, 0) +} + +func (s *Frame_leftContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Frame_leftContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Frame_leftContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterFrame_left(s) + } +} + +func (s *Frame_leftContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitFrame_left(s) + } +} + +func (p *SQLiteParser) Frame_left() (localctx IFrame_leftContext) { + this := p + _ = this + + localctx = NewFrame_leftContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 144, SQLiteParserRULE_frame_left) + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.SetState(1836) + p.GetErrorHandler().Sync(p) + switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 274, p.GetParserRuleContext()) { + case 1: + p.EnterOuterAlt(localctx, 1) + { + p.SetState(1826) + p.expr(0) + } + { + p.SetState(1827) + p.Match(SQLiteParserPRECEDING_) + } + + case 2: + p.EnterOuterAlt(localctx, 2) + { + p.SetState(1829) + p.expr(0) + } + { + p.SetState(1830) + p.Match(SQLiteParserFOLLOWING_) + } + + case 3: + p.EnterOuterAlt(localctx, 3) + { + p.SetState(1832) + p.Match(SQLiteParserCURRENT_) + } + { + p.SetState(1833) + p.Match(SQLiteParserROW_) + } + + case 4: + p.EnterOuterAlt(localctx, 4) + { + p.SetState(1834) + p.Match(SQLiteParserUNBOUNDED_) + } + { + p.SetState(1835) + p.Match(SQLiteParserPRECEDING_) + } + + } + + return localctx +} + +// IFrame_rightContext is an interface to support dynamic dispatch. +type IFrame_rightContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + Expr() IExprContext + PRECEDING_() antlr.TerminalNode + FOLLOWING_() antlr.TerminalNode + CURRENT_() antlr.TerminalNode + ROW_() antlr.TerminalNode + UNBOUNDED_() antlr.TerminalNode + + // IsFrame_rightContext differentiates from other interfaces. + IsFrame_rightContext() +} + +type Frame_rightContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyFrame_rightContext() *Frame_rightContext { + var p = new(Frame_rightContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_frame_right + return p +} + +func (*Frame_rightContext) IsFrame_rightContext() {} + +func NewFrame_rightContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Frame_rightContext { + var p = new(Frame_rightContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_frame_right + + return p +} + +func (s *Frame_rightContext) GetParser() antlr.Parser { return s.parser } + +func (s *Frame_rightContext) Expr() IExprContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IExprContext) +} + +func (s *Frame_rightContext) PRECEDING_() antlr.TerminalNode { + return s.GetToken(SQLiteParserPRECEDING_, 0) +} + +func (s *Frame_rightContext) FOLLOWING_() antlr.TerminalNode { + return s.GetToken(SQLiteParserFOLLOWING_, 0) +} + +func (s *Frame_rightContext) CURRENT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserCURRENT_, 0) +} + +func (s *Frame_rightContext) ROW_() antlr.TerminalNode { + return s.GetToken(SQLiteParserROW_, 0) +} + +func (s *Frame_rightContext) UNBOUNDED_() antlr.TerminalNode { + return s.GetToken(SQLiteParserUNBOUNDED_, 0) +} + +func (s *Frame_rightContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Frame_rightContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Frame_rightContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterFrame_right(s) + } +} + +func (s *Frame_rightContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitFrame_right(s) + } +} + +func (p *SQLiteParser) Frame_right() (localctx IFrame_rightContext) { + this := p + _ = this + + localctx = NewFrame_rightContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 146, SQLiteParserRULE_frame_right) + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.SetState(1848) + p.GetErrorHandler().Sync(p) + switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 275, p.GetParserRuleContext()) { + case 1: + p.EnterOuterAlt(localctx, 1) + { + p.SetState(1838) + p.expr(0) + } + { + p.SetState(1839) + p.Match(SQLiteParserPRECEDING_) + } + + case 2: + p.EnterOuterAlt(localctx, 2) + { + p.SetState(1841) + p.expr(0) + } + { + p.SetState(1842) + p.Match(SQLiteParserFOLLOWING_) + } + + case 3: + p.EnterOuterAlt(localctx, 3) + { + p.SetState(1844) + p.Match(SQLiteParserCURRENT_) + } + { + p.SetState(1845) + p.Match(SQLiteParserROW_) + } + + case 4: + p.EnterOuterAlt(localctx, 4) + { + p.SetState(1846) + p.Match(SQLiteParserUNBOUNDED_) + } + { + p.SetState(1847) + p.Match(SQLiteParserFOLLOWING_) + } + + } + + return localctx +} + +// IFrame_singleContext is an interface to support dynamic dispatch. +type IFrame_singleContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + Expr() IExprContext + PRECEDING_() antlr.TerminalNode + UNBOUNDED_() antlr.TerminalNode + CURRENT_() antlr.TerminalNode + ROW_() antlr.TerminalNode + + // IsFrame_singleContext differentiates from other interfaces. + IsFrame_singleContext() +} + +type Frame_singleContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyFrame_singleContext() *Frame_singleContext { + var p = new(Frame_singleContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_frame_single + return p +} + +func (*Frame_singleContext) IsFrame_singleContext() {} + +func NewFrame_singleContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Frame_singleContext { + var p = new(Frame_singleContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_frame_single + + return p +} + +func (s *Frame_singleContext) GetParser() antlr.Parser { return s.parser } + +func (s *Frame_singleContext) Expr() IExprContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IExprContext) +} + +func (s *Frame_singleContext) PRECEDING_() antlr.TerminalNode { + return s.GetToken(SQLiteParserPRECEDING_, 0) +} + +func (s *Frame_singleContext) UNBOUNDED_() antlr.TerminalNode { + return s.GetToken(SQLiteParserUNBOUNDED_, 0) +} + +func (s *Frame_singleContext) CURRENT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserCURRENT_, 0) +} + +func (s *Frame_singleContext) ROW_() antlr.TerminalNode { + return s.GetToken(SQLiteParserROW_, 0) +} + +func (s *Frame_singleContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Frame_singleContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Frame_singleContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterFrame_single(s) + } +} + +func (s *Frame_singleContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitFrame_single(s) + } +} + +func (p *SQLiteParser) Frame_single() (localctx IFrame_singleContext) { + this := p + _ = this + + localctx = NewFrame_singleContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 148, SQLiteParserRULE_frame_single) + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.SetState(1857) + p.GetErrorHandler().Sync(p) + switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 276, p.GetParserRuleContext()) { + case 1: + p.EnterOuterAlt(localctx, 1) + { + p.SetState(1850) + p.expr(0) + } + { + p.SetState(1851) + p.Match(SQLiteParserPRECEDING_) + } + + case 2: + p.EnterOuterAlt(localctx, 2) + { + p.SetState(1853) + p.Match(SQLiteParserUNBOUNDED_) + } + { + p.SetState(1854) + p.Match(SQLiteParserPRECEDING_) + } + + case 3: + p.EnterOuterAlt(localctx, 3) + { + p.SetState(1855) + p.Match(SQLiteParserCURRENT_) + } + { + p.SetState(1856) + p.Match(SQLiteParserROW_) + } + + } + + return localctx +} + +// IWindow_functionContext is an interface to support dynamic dispatch. +type IWindow_functionContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + AllOPEN_PAR() []antlr.TerminalNode + OPEN_PAR(i int) antlr.TerminalNode + Expr() IExprContext + AllCLOSE_PAR() []antlr.TerminalNode + CLOSE_PAR(i int) antlr.TerminalNode + OVER_() antlr.TerminalNode + Order_by_expr_asc_desc() IOrder_by_expr_asc_descContext + FIRST_VALUE_() antlr.TerminalNode + LAST_VALUE_() antlr.TerminalNode + Partition_by() IPartition_byContext + Frame_clause() IFrame_clauseContext + CUME_DIST_() antlr.TerminalNode + PERCENT_RANK_() antlr.TerminalNode + Order_by_expr() IOrder_by_exprContext + DENSE_RANK_() antlr.TerminalNode + RANK_() antlr.TerminalNode + ROW_NUMBER_() antlr.TerminalNode + LAG_() antlr.TerminalNode + LEAD_() antlr.TerminalNode + Offset() IOffsetContext + Default_value() IDefault_valueContext + NTH_VALUE_() antlr.TerminalNode + COMMA() antlr.TerminalNode + Signed_number() ISigned_numberContext + NTILE_() antlr.TerminalNode + + // IsWindow_functionContext differentiates from other interfaces. + IsWindow_functionContext() +} + +type Window_functionContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyWindow_functionContext() *Window_functionContext { + var p = new(Window_functionContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_window_function + return p +} + +func (*Window_functionContext) IsWindow_functionContext() {} + +func NewWindow_functionContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Window_functionContext { + var p = new(Window_functionContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_window_function + + return p +} + +func (s *Window_functionContext) GetParser() antlr.Parser { return s.parser } + +func (s *Window_functionContext) AllOPEN_PAR() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserOPEN_PAR) +} + +func (s *Window_functionContext) OPEN_PAR(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserOPEN_PAR, i) +} + +func (s *Window_functionContext) Expr() IExprContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IExprContext) +} + +func (s *Window_functionContext) AllCLOSE_PAR() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserCLOSE_PAR) +} + +func (s *Window_functionContext) CLOSE_PAR(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserCLOSE_PAR, i) +} + +func (s *Window_functionContext) OVER_() antlr.TerminalNode { + return s.GetToken(SQLiteParserOVER_, 0) +} + +func (s *Window_functionContext) Order_by_expr_asc_desc() IOrder_by_expr_asc_descContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IOrder_by_expr_asc_descContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IOrder_by_expr_asc_descContext) +} + +func (s *Window_functionContext) FIRST_VALUE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserFIRST_VALUE_, 0) +} + +func (s *Window_functionContext) LAST_VALUE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserLAST_VALUE_, 0) +} + +func (s *Window_functionContext) Partition_by() IPartition_byContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IPartition_byContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IPartition_byContext) +} + +func (s *Window_functionContext) Frame_clause() IFrame_clauseContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IFrame_clauseContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IFrame_clauseContext) +} + +func (s *Window_functionContext) CUME_DIST_() antlr.TerminalNode { + return s.GetToken(SQLiteParserCUME_DIST_, 0) +} + +func (s *Window_functionContext) PERCENT_RANK_() antlr.TerminalNode { + return s.GetToken(SQLiteParserPERCENT_RANK_, 0) +} + +func (s *Window_functionContext) Order_by_expr() IOrder_by_exprContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IOrder_by_exprContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IOrder_by_exprContext) +} + +func (s *Window_functionContext) DENSE_RANK_() antlr.TerminalNode { + return s.GetToken(SQLiteParserDENSE_RANK_, 0) +} + +func (s *Window_functionContext) RANK_() antlr.TerminalNode { + return s.GetToken(SQLiteParserRANK_, 0) +} + +func (s *Window_functionContext) ROW_NUMBER_() antlr.TerminalNode { + return s.GetToken(SQLiteParserROW_NUMBER_, 0) +} + +func (s *Window_functionContext) LAG_() antlr.TerminalNode { + return s.GetToken(SQLiteParserLAG_, 0) +} + +func (s *Window_functionContext) LEAD_() antlr.TerminalNode { + return s.GetToken(SQLiteParserLEAD_, 0) +} + +func (s *Window_functionContext) Offset() IOffsetContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IOffsetContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IOffsetContext) +} + +func (s *Window_functionContext) Default_value() IDefault_valueContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IDefault_valueContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IDefault_valueContext) +} + +func (s *Window_functionContext) NTH_VALUE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserNTH_VALUE_, 0) +} + +func (s *Window_functionContext) COMMA() antlr.TerminalNode { + return s.GetToken(SQLiteParserCOMMA, 0) +} + +func (s *Window_functionContext) Signed_number() ISigned_numberContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ISigned_numberContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ISigned_numberContext) +} + +func (s *Window_functionContext) NTILE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserNTILE_, 0) +} + +func (s *Window_functionContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Window_functionContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Window_functionContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterWindow_function(s) + } +} + +func (s *Window_functionContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitWindow_function(s) + } +} + +func (p *SQLiteParser) Window_function() (localctx IWindow_functionContext) { + this := p + _ = this + + localctx = NewWindow_functionContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 150, SQLiteParserRULE_window_function) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.SetState(1944) + p.GetErrorHandler().Sync(p) + + switch p.GetTokenStream().LA(1) { + case SQLiteParserFIRST_VALUE_, SQLiteParserLAST_VALUE_: + p.EnterOuterAlt(localctx, 1) + { + p.SetState(1859) + _la = p.GetTokenStream().LA(1) + + if !(_la == SQLiteParserFIRST_VALUE_ || _la == SQLiteParserLAST_VALUE_) { + p.GetErrorHandler().RecoverInline(p) + } else { + p.GetErrorHandler().ReportMatch(p) + p.Consume() + } + } + { + p.SetState(1860) + p.Match(SQLiteParserOPEN_PAR) + } + { + p.SetState(1861) + p.expr(0) + } + { + p.SetState(1862) + p.Match(SQLiteParserCLOSE_PAR) + } + { + p.SetState(1863) + p.Match(SQLiteParserOVER_) + } + { + p.SetState(1864) + p.Match(SQLiteParserOPEN_PAR) + } + p.SetState(1866) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserPARTITION_ { + { + p.SetState(1865) + p.Partition_by() + } + + } + { + p.SetState(1868) + p.Order_by_expr_asc_desc() + } + p.SetState(1870) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if (int64((_la-128)) & ^0x3f) == 0 && ((int64(1)<<(_la-128))&2251799880794113) != 0 { + { + p.SetState(1869) + p.Frame_clause() + } + + } + { + p.SetState(1872) + p.Match(SQLiteParserCLOSE_PAR) + } + + case SQLiteParserCUME_DIST_, SQLiteParserPERCENT_RANK_: + p.EnterOuterAlt(localctx, 2) + { + p.SetState(1874) + _la = p.GetTokenStream().LA(1) + + if !(_la == SQLiteParserCUME_DIST_ || _la == SQLiteParserPERCENT_RANK_) { + p.GetErrorHandler().RecoverInline(p) + } else { + p.GetErrorHandler().ReportMatch(p) + p.Consume() + } + } + { + p.SetState(1875) + p.Match(SQLiteParserOPEN_PAR) + } + { + p.SetState(1876) + p.Match(SQLiteParserCLOSE_PAR) + } + { + p.SetState(1877) + p.Match(SQLiteParserOVER_) + } + { + p.SetState(1878) + p.Match(SQLiteParserOPEN_PAR) + } + p.SetState(1880) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserPARTITION_ { + { + p.SetState(1879) + p.Partition_by() + } + + } + p.SetState(1883) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserORDER_ { + { + p.SetState(1882) + p.Order_by_expr() + } + + } + { + p.SetState(1885) + p.Match(SQLiteParserCLOSE_PAR) + } + + case SQLiteParserDENSE_RANK_, SQLiteParserRANK_, SQLiteParserROW_NUMBER_: + p.EnterOuterAlt(localctx, 3) + { + p.SetState(1886) + _la = p.GetTokenStream().LA(1) + + if !((int64((_la-160)) & ^0x3f) == 0 && ((int64(1)<<(_la-160))&385) != 0) { + p.GetErrorHandler().RecoverInline(p) + } else { + p.GetErrorHandler().ReportMatch(p) + p.Consume() + } + } + { + p.SetState(1887) + p.Match(SQLiteParserOPEN_PAR) + } + { + p.SetState(1888) + p.Match(SQLiteParserCLOSE_PAR) + } + { + p.SetState(1889) + p.Match(SQLiteParserOVER_) + } + { + p.SetState(1890) + p.Match(SQLiteParserOPEN_PAR) + } + p.SetState(1892) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserPARTITION_ { + { + p.SetState(1891) + p.Partition_by() + } + + } + { + p.SetState(1894) + p.Order_by_expr_asc_desc() + } + { + p.SetState(1895) + p.Match(SQLiteParserCLOSE_PAR) + } + + case SQLiteParserLAG_, SQLiteParserLEAD_: + p.EnterOuterAlt(localctx, 4) + { + p.SetState(1897) + _la = p.GetTokenStream().LA(1) + + if !(_la == SQLiteParserLAG_ || _la == SQLiteParserLEAD_) { + p.GetErrorHandler().RecoverInline(p) + } else { + p.GetErrorHandler().ReportMatch(p) + p.Consume() + } + } + { + p.SetState(1898) + p.Match(SQLiteParserOPEN_PAR) + } + { + p.SetState(1899) + p.expr(0) + } + p.SetState(1901) + p.GetErrorHandler().Sync(p) + + if p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 282, p.GetParserRuleContext()) == 1 { + { + p.SetState(1900) + p.Offset() + } + + } + p.SetState(1904) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserCOMMA { + { + p.SetState(1903) + p.Default_value() + } + + } + { + p.SetState(1906) + p.Match(SQLiteParserCLOSE_PAR) + } + { + p.SetState(1907) + p.Match(SQLiteParserOVER_) + } + { + p.SetState(1908) + p.Match(SQLiteParserOPEN_PAR) + } + p.SetState(1910) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserPARTITION_ { + { + p.SetState(1909) + p.Partition_by() + } + + } + { + p.SetState(1912) + p.Order_by_expr_asc_desc() + } + { + p.SetState(1913) + p.Match(SQLiteParserCLOSE_PAR) + } + + case SQLiteParserNTH_VALUE_: + p.EnterOuterAlt(localctx, 5) + { + p.SetState(1915) + p.Match(SQLiteParserNTH_VALUE_) + } + { + p.SetState(1916) + p.Match(SQLiteParserOPEN_PAR) + } + { + p.SetState(1917) + p.expr(0) + } + { + p.SetState(1918) + p.Match(SQLiteParserCOMMA) + } + { + p.SetState(1919) + p.Signed_number() + } + { + p.SetState(1920) + p.Match(SQLiteParserCLOSE_PAR) + } + { + p.SetState(1921) + p.Match(SQLiteParserOVER_) + } + { + p.SetState(1922) + p.Match(SQLiteParserOPEN_PAR) + } + p.SetState(1924) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserPARTITION_ { + { + p.SetState(1923) + p.Partition_by() + } + + } + { + p.SetState(1926) + p.Order_by_expr_asc_desc() + } + p.SetState(1928) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if (int64((_la-128)) & ^0x3f) == 0 && ((int64(1)<<(_la-128))&2251799880794113) != 0 { + { + p.SetState(1927) + p.Frame_clause() + } + + } + { + p.SetState(1930) + p.Match(SQLiteParserCLOSE_PAR) + } + + case SQLiteParserNTILE_: + p.EnterOuterAlt(localctx, 6) + { + p.SetState(1932) + p.Match(SQLiteParserNTILE_) + } + { + p.SetState(1933) + p.Match(SQLiteParserOPEN_PAR) + } + { + p.SetState(1934) + p.expr(0) + } + { + p.SetState(1935) + p.Match(SQLiteParserCLOSE_PAR) + } + { + p.SetState(1936) + p.Match(SQLiteParserOVER_) + } + { + p.SetState(1937) + p.Match(SQLiteParserOPEN_PAR) + } + p.SetState(1939) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserPARTITION_ { + { + p.SetState(1938) + p.Partition_by() + } + + } + { + p.SetState(1941) + p.Order_by_expr_asc_desc() + } + { + p.SetState(1942) + p.Match(SQLiteParserCLOSE_PAR) + } + + default: + panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) + } + + return localctx +} + +// IOffsetContext is an interface to support dynamic dispatch. +type IOffsetContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + COMMA() antlr.TerminalNode + Signed_number() ISigned_numberContext + + // IsOffsetContext differentiates from other interfaces. + IsOffsetContext() +} + +type OffsetContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyOffsetContext() *OffsetContext { + var p = new(OffsetContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_offset + return p +} + +func (*OffsetContext) IsOffsetContext() {} + +func NewOffsetContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *OffsetContext { + var p = new(OffsetContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_offset + + return p +} + +func (s *OffsetContext) GetParser() antlr.Parser { return s.parser } + +func (s *OffsetContext) COMMA() antlr.TerminalNode { + return s.GetToken(SQLiteParserCOMMA, 0) +} + +func (s *OffsetContext) Signed_number() ISigned_numberContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ISigned_numberContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ISigned_numberContext) +} + +func (s *OffsetContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *OffsetContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *OffsetContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterOffset(s) + } +} + +func (s *OffsetContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitOffset(s) + } +} + +func (p *SQLiteParser) Offset() (localctx IOffsetContext) { + this := p + _ = this + + localctx = NewOffsetContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 152, SQLiteParserRULE_offset) + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(1946) + p.Match(SQLiteParserCOMMA) + } + { + p.SetState(1947) + p.Signed_number() + } + + return localctx +} + +// IDefault_valueContext is an interface to support dynamic dispatch. +type IDefault_valueContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + COMMA() antlr.TerminalNode + Signed_number() ISigned_numberContext + + // IsDefault_valueContext differentiates from other interfaces. + IsDefault_valueContext() +} + +type Default_valueContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyDefault_valueContext() *Default_valueContext { + var p = new(Default_valueContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_default_value + return p +} + +func (*Default_valueContext) IsDefault_valueContext() {} + +func NewDefault_valueContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Default_valueContext { + var p = new(Default_valueContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_default_value + + return p +} + +func (s *Default_valueContext) GetParser() antlr.Parser { return s.parser } + +func (s *Default_valueContext) COMMA() antlr.TerminalNode { + return s.GetToken(SQLiteParserCOMMA, 0) +} + +func (s *Default_valueContext) Signed_number() ISigned_numberContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ISigned_numberContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ISigned_numberContext) +} + +func (s *Default_valueContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Default_valueContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Default_valueContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterDefault_value(s) + } +} + +func (s *Default_valueContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitDefault_value(s) + } +} + +func (p *SQLiteParser) Default_value() (localctx IDefault_valueContext) { + this := p + _ = this + + localctx = NewDefault_valueContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 154, SQLiteParserRULE_default_value) + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(1949) + p.Match(SQLiteParserCOMMA) + } + { + p.SetState(1950) + p.Signed_number() + } + + return localctx +} + +// IPartition_byContext is an interface to support dynamic dispatch. +type IPartition_byContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + PARTITION_() antlr.TerminalNode + BY_() antlr.TerminalNode + AllExpr() []IExprContext + Expr(i int) IExprContext + + // IsPartition_byContext differentiates from other interfaces. + IsPartition_byContext() +} + +type Partition_byContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyPartition_byContext() *Partition_byContext { + var p = new(Partition_byContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_partition_by + return p +} + +func (*Partition_byContext) IsPartition_byContext() {} + +func NewPartition_byContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Partition_byContext { + var p = new(Partition_byContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_partition_by + + return p +} + +func (s *Partition_byContext) GetParser() antlr.Parser { return s.parser } + +func (s *Partition_byContext) PARTITION_() antlr.TerminalNode { + return s.GetToken(SQLiteParserPARTITION_, 0) +} + +func (s *Partition_byContext) BY_() antlr.TerminalNode { + return s.GetToken(SQLiteParserBY_, 0) +} + +func (s *Partition_byContext) AllExpr() []IExprContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IExprContext); ok { + len++ + } + } + + tst := make([]IExprContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IExprContext); ok { + tst[i] = t.(IExprContext) + i++ + } + } + + return tst +} + +func (s *Partition_byContext) Expr(i int) IExprContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IExprContext) +} + +func (s *Partition_byContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Partition_byContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Partition_byContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterPartition_by(s) + } +} + +func (s *Partition_byContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitPartition_by(s) + } +} + +func (p *SQLiteParser) Partition_by() (localctx IPartition_byContext) { + this := p + _ = this + + localctx = NewPartition_byContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 156, SQLiteParserRULE_partition_by) + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + var _alt int + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(1952) + p.Match(SQLiteParserPARTITION_) + } + { + p.SetState(1953) + p.Match(SQLiteParserBY_) + } + p.SetState(1955) + p.GetErrorHandler().Sync(p) + _alt = 1 + for ok := true; ok; ok = _alt != 2 && _alt != antlr.ATNInvalidAltNumber { + switch _alt { + case 1: + { + p.SetState(1954) + p.expr(0) + } + + default: + panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) + } + + p.SetState(1957) + p.GetErrorHandler().Sync(p) + _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 289, p.GetParserRuleContext()) + } + + return localctx +} + +// IOrder_by_exprContext is an interface to support dynamic dispatch. +type IOrder_by_exprContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + ORDER_() antlr.TerminalNode + BY_() antlr.TerminalNode + AllExpr() []IExprContext + Expr(i int) IExprContext + + // IsOrder_by_exprContext differentiates from other interfaces. + IsOrder_by_exprContext() +} + +type Order_by_exprContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyOrder_by_exprContext() *Order_by_exprContext { + var p = new(Order_by_exprContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_order_by_expr + return p +} + +func (*Order_by_exprContext) IsOrder_by_exprContext() {} + +func NewOrder_by_exprContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Order_by_exprContext { + var p = new(Order_by_exprContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_order_by_expr + + return p +} + +func (s *Order_by_exprContext) GetParser() antlr.Parser { return s.parser } + +func (s *Order_by_exprContext) ORDER_() antlr.TerminalNode { + return s.GetToken(SQLiteParserORDER_, 0) +} + +func (s *Order_by_exprContext) BY_() antlr.TerminalNode { + return s.GetToken(SQLiteParserBY_, 0) +} + +func (s *Order_by_exprContext) AllExpr() []IExprContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IExprContext); ok { + len++ + } + } + + tst := make([]IExprContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IExprContext); ok { + tst[i] = t.(IExprContext) + i++ + } + } + + return tst +} + +func (s *Order_by_exprContext) Expr(i int) IExprContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IExprContext) +} + +func (s *Order_by_exprContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Order_by_exprContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Order_by_exprContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterOrder_by_expr(s) + } +} + +func (s *Order_by_exprContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitOrder_by_expr(s) + } +} + +func (p *SQLiteParser) Order_by_expr() (localctx IOrder_by_exprContext) { + this := p + _ = this + + localctx = NewOrder_by_exprContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 158, SQLiteParserRULE_order_by_expr) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(1959) + p.Match(SQLiteParserORDER_) + } + { + p.SetState(1960) + p.Match(SQLiteParserBY_) + } + p.SetState(1962) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for ok := true; ok; ok = ((int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&-33552632) != 0) || ((int64((_la-64)) & ^0x3f) == 0 && ((int64(1)<<(_la-64))&-1152921504606846977) != 0) || ((int64((_la-128)) & ^0x3f) == 0 && ((int64(1)<<(_la-128))&4476578029606273023) != 0) { + { + p.SetState(1961) + p.expr(0) + } + + p.SetState(1964) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + + return localctx +} + +// IOrder_by_expr_asc_descContext is an interface to support dynamic dispatch. +type IOrder_by_expr_asc_descContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + ORDER_() antlr.TerminalNode + BY_() antlr.TerminalNode + Expr_asc_desc() IExpr_asc_descContext + + // IsOrder_by_expr_asc_descContext differentiates from other interfaces. + IsOrder_by_expr_asc_descContext() +} + +type Order_by_expr_asc_descContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyOrder_by_expr_asc_descContext() *Order_by_expr_asc_descContext { + var p = new(Order_by_expr_asc_descContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_order_by_expr_asc_desc + return p +} + +func (*Order_by_expr_asc_descContext) IsOrder_by_expr_asc_descContext() {} + +func NewOrder_by_expr_asc_descContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Order_by_expr_asc_descContext { + var p = new(Order_by_expr_asc_descContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_order_by_expr_asc_desc + + return p +} + +func (s *Order_by_expr_asc_descContext) GetParser() antlr.Parser { return s.parser } + +func (s *Order_by_expr_asc_descContext) ORDER_() antlr.TerminalNode { + return s.GetToken(SQLiteParserORDER_, 0) +} + +func (s *Order_by_expr_asc_descContext) BY_() antlr.TerminalNode { + return s.GetToken(SQLiteParserBY_, 0) +} + +func (s *Order_by_expr_asc_descContext) Expr_asc_desc() IExpr_asc_descContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExpr_asc_descContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IExpr_asc_descContext) +} + +func (s *Order_by_expr_asc_descContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Order_by_expr_asc_descContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Order_by_expr_asc_descContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterOrder_by_expr_asc_desc(s) + } +} + +func (s *Order_by_expr_asc_descContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitOrder_by_expr_asc_desc(s) + } +} + +func (p *SQLiteParser) Order_by_expr_asc_desc() (localctx IOrder_by_expr_asc_descContext) { + this := p + _ = this + + localctx = NewOrder_by_expr_asc_descContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 160, SQLiteParserRULE_order_by_expr_asc_desc) + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(1966) + p.Match(SQLiteParserORDER_) + } + { + p.SetState(1967) + p.Match(SQLiteParserBY_) + } + { + p.SetState(1968) + p.Expr_asc_desc() + } + + return localctx +} + +// IExpr_asc_descContext is an interface to support dynamic dispatch. +type IExpr_asc_descContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + AllExpr() []IExprContext + Expr(i int) IExprContext + AllAsc_desc() []IAsc_descContext + Asc_desc(i int) IAsc_descContext + AllCOMMA() []antlr.TerminalNode + COMMA(i int) antlr.TerminalNode + + // IsExpr_asc_descContext differentiates from other interfaces. + IsExpr_asc_descContext() +} + +type Expr_asc_descContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyExpr_asc_descContext() *Expr_asc_descContext { + var p = new(Expr_asc_descContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_expr_asc_desc + return p +} + +func (*Expr_asc_descContext) IsExpr_asc_descContext() {} + +func NewExpr_asc_descContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Expr_asc_descContext { + var p = new(Expr_asc_descContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_expr_asc_desc + + return p +} + +func (s *Expr_asc_descContext) GetParser() antlr.Parser { return s.parser } + +func (s *Expr_asc_descContext) AllExpr() []IExprContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IExprContext); ok { + len++ + } + } + + tst := make([]IExprContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IExprContext); ok { + tst[i] = t.(IExprContext) + i++ + } + } + + return tst +} + +func (s *Expr_asc_descContext) Expr(i int) IExprContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IExprContext) +} + +func (s *Expr_asc_descContext) AllAsc_desc() []IAsc_descContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IAsc_descContext); ok { + len++ + } + } + + tst := make([]IAsc_descContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IAsc_descContext); ok { + tst[i] = t.(IAsc_descContext) + i++ + } + } + + return tst +} + +func (s *Expr_asc_descContext) Asc_desc(i int) IAsc_descContext { + var t antlr.RuleContext + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IAsc_descContext); ok { + if j == i { + t = ctx.(antlr.RuleContext) + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IAsc_descContext) +} + +func (s *Expr_asc_descContext) AllCOMMA() []antlr.TerminalNode { + return s.GetTokens(SQLiteParserCOMMA) +} + +func (s *Expr_asc_descContext) COMMA(i int) antlr.TerminalNode { + return s.GetToken(SQLiteParserCOMMA, i) +} + +func (s *Expr_asc_descContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Expr_asc_descContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Expr_asc_descContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterExpr_asc_desc(s) + } +} + +func (s *Expr_asc_descContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitExpr_asc_desc(s) + } +} + +func (p *SQLiteParser) Expr_asc_desc() (localctx IExpr_asc_descContext) { + this := p + _ = this + + localctx = NewExpr_asc_descContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 162, SQLiteParserRULE_expr_asc_desc) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(1970) + p.expr(0) + } + p.SetState(1972) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserASC_ || _la == SQLiteParserDESC_ { + { + p.SetState(1971) + p.Asc_desc() + } + + } + p.SetState(1981) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + for _la == SQLiteParserCOMMA { + { + p.SetState(1974) + p.Match(SQLiteParserCOMMA) + } + { + p.SetState(1975) + p.expr(0) + } + p.SetState(1977) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + + if _la == SQLiteParserASC_ || _la == SQLiteParserDESC_ { + { + p.SetState(1976) + p.Asc_desc() + } + + } + + p.SetState(1983) + p.GetErrorHandler().Sync(p) + _la = p.GetTokenStream().LA(1) + } + + return localctx +} + +// IInitial_selectContext is an interface to support dynamic dispatch. +type IInitial_selectContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + Select_stmt() ISelect_stmtContext + + // IsInitial_selectContext differentiates from other interfaces. + IsInitial_selectContext() +} + +type Initial_selectContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyInitial_selectContext() *Initial_selectContext { + var p = new(Initial_selectContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_initial_select + return p +} + +func (*Initial_selectContext) IsInitial_selectContext() {} + +func NewInitial_selectContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Initial_selectContext { + var p = new(Initial_selectContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_initial_select + + return p +} + +func (s *Initial_selectContext) GetParser() antlr.Parser { return s.parser } + +func (s *Initial_selectContext) Select_stmt() ISelect_stmtContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ISelect_stmtContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ISelect_stmtContext) +} + +func (s *Initial_selectContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Initial_selectContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Initial_selectContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterInitial_select(s) + } +} + +func (s *Initial_selectContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitInitial_select(s) + } +} + +func (p *SQLiteParser) Initial_select() (localctx IInitial_selectContext) { + this := p + _ = this + + localctx = NewInitial_selectContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 164, SQLiteParserRULE_initial_select) + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(1984) + p.Select_stmt() + } + + return localctx +} + +// IRecursive_selectContext is an interface to support dynamic dispatch. +type IRecursive_selectContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + Select_stmt() ISelect_stmtContext + + // IsRecursive_selectContext differentiates from other interfaces. + IsRecursive_selectContext() +} + +type Recursive_selectContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyRecursive_selectContext() *Recursive_selectContext { + var p = new(Recursive_selectContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_recursive_select + return p +} + +func (*Recursive_selectContext) IsRecursive_selectContext() {} + +func NewRecursive_selectContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Recursive_selectContext { + var p = new(Recursive_selectContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_recursive_select + + return p +} + +func (s *Recursive_selectContext) GetParser() antlr.Parser { return s.parser } + +func (s *Recursive_selectContext) Select_stmt() ISelect_stmtContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ISelect_stmtContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(ISelect_stmtContext) +} + +func (s *Recursive_selectContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Recursive_selectContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Recursive_selectContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterRecursive_select(s) + } +} + +func (s *Recursive_selectContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitRecursive_select(s) + } +} + +func (p *SQLiteParser) Recursive_select() (localctx IRecursive_selectContext) { + this := p + _ = this + + localctx = NewRecursive_selectContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 166, SQLiteParserRULE_recursive_select) + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(1986) + p.Select_stmt() + } + + return localctx +} + +// IUnary_operatorContext is an interface to support dynamic dispatch. +type IUnary_operatorContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + MINUS() antlr.TerminalNode + PLUS() antlr.TerminalNode + TILDE() antlr.TerminalNode + NOT_() antlr.TerminalNode + + // IsUnary_operatorContext differentiates from other interfaces. + IsUnary_operatorContext() +} + +type Unary_operatorContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyUnary_operatorContext() *Unary_operatorContext { + var p = new(Unary_operatorContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_unary_operator + return p +} + +func (*Unary_operatorContext) IsUnary_operatorContext() {} + +func NewUnary_operatorContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Unary_operatorContext { + var p = new(Unary_operatorContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_unary_operator + + return p +} + +func (s *Unary_operatorContext) GetParser() antlr.Parser { return s.parser } + +func (s *Unary_operatorContext) MINUS() antlr.TerminalNode { + return s.GetToken(SQLiteParserMINUS, 0) +} + +func (s *Unary_operatorContext) PLUS() antlr.TerminalNode { + return s.GetToken(SQLiteParserPLUS, 0) +} + +func (s *Unary_operatorContext) TILDE() antlr.TerminalNode { + return s.GetToken(SQLiteParserTILDE, 0) +} + +func (s *Unary_operatorContext) NOT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserNOT_, 0) +} + +func (s *Unary_operatorContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Unary_operatorContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Unary_operatorContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterUnary_operator(s) + } +} + +func (s *Unary_operatorContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitUnary_operator(s) + } +} + +func (p *SQLiteParser) Unary_operator() (localctx IUnary_operatorContext) { + this := p + _ = this + + localctx = NewUnary_operatorContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 168, SQLiteParserRULE_unary_operator) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(1988) + _la = p.GetTokenStream().LA(1) + + if !(((int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&1792) != 0) || _la == SQLiteParserNOT_) { + p.GetErrorHandler().RecoverInline(p) + } else { + p.GetErrorHandler().ReportMatch(p) + p.Consume() + } + } + + return localctx +} + +// IError_messageContext is an interface to support dynamic dispatch. +type IError_messageContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + STRING_LITERAL() antlr.TerminalNode + + // IsError_messageContext differentiates from other interfaces. + IsError_messageContext() +} + +type Error_messageContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyError_messageContext() *Error_messageContext { + var p = new(Error_messageContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_error_message + return p +} + +func (*Error_messageContext) IsError_messageContext() {} + +func NewError_messageContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Error_messageContext { + var p = new(Error_messageContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_error_message + + return p +} + +func (s *Error_messageContext) GetParser() antlr.Parser { return s.parser } + +func (s *Error_messageContext) STRING_LITERAL() antlr.TerminalNode { + return s.GetToken(SQLiteParserSTRING_LITERAL, 0) +} + +func (s *Error_messageContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Error_messageContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Error_messageContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterError_message(s) + } +} + +func (s *Error_messageContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitError_message(s) + } +} + +func (p *SQLiteParser) Error_message() (localctx IError_messageContext) { + this := p + _ = this + + localctx = NewError_messageContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 170, SQLiteParserRULE_error_message) + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(1990) + p.Match(SQLiteParserSTRING_LITERAL) + } + + return localctx +} + +// IModule_argumentContext is an interface to support dynamic dispatch. +type IModule_argumentContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + Expr() IExprContext + Column_def() IColumn_defContext + + // IsModule_argumentContext differentiates from other interfaces. + IsModule_argumentContext() +} + +type Module_argumentContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyModule_argumentContext() *Module_argumentContext { + var p = new(Module_argumentContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_module_argument + return p +} + +func (*Module_argumentContext) IsModule_argumentContext() {} + +func NewModule_argumentContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Module_argumentContext { + var p = new(Module_argumentContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_module_argument + + return p +} + +func (s *Module_argumentContext) GetParser() antlr.Parser { return s.parser } + +func (s *Module_argumentContext) Expr() IExprContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IExprContext) +} + +func (s *Module_argumentContext) Column_def() IColumn_defContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IColumn_defContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IColumn_defContext) +} + +func (s *Module_argumentContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Module_argumentContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Module_argumentContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterModule_argument(s) + } +} + +func (s *Module_argumentContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitModule_argument(s) + } +} + +func (p *SQLiteParser) Module_argument() (localctx IModule_argumentContext) { + this := p + _ = this + + localctx = NewModule_argumentContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 172, SQLiteParserRULE_module_argument) + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.SetState(1994) + p.GetErrorHandler().Sync(p) + switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 294, p.GetParserRuleContext()) { + case 1: + p.EnterOuterAlt(localctx, 1) + { + p.SetState(1992) + p.expr(0) + } + + case 2: + p.EnterOuterAlt(localctx, 2) + { + p.SetState(1993) + p.Column_def() + } + + } + + return localctx +} + +// IColumn_aliasContext is an interface to support dynamic dispatch. +type IColumn_aliasContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + IDENTIFIER() antlr.TerminalNode + STRING_LITERAL() antlr.TerminalNode + + // IsColumn_aliasContext differentiates from other interfaces. + IsColumn_aliasContext() +} + +type Column_aliasContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyColumn_aliasContext() *Column_aliasContext { + var p = new(Column_aliasContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_column_alias + return p +} + +func (*Column_aliasContext) IsColumn_aliasContext() {} + +func NewColumn_aliasContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Column_aliasContext { + var p = new(Column_aliasContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_column_alias + + return p +} + +func (s *Column_aliasContext) GetParser() antlr.Parser { return s.parser } + +func (s *Column_aliasContext) IDENTIFIER() antlr.TerminalNode { + return s.GetToken(SQLiteParserIDENTIFIER, 0) +} + +func (s *Column_aliasContext) STRING_LITERAL() antlr.TerminalNode { + return s.GetToken(SQLiteParserSTRING_LITERAL, 0) +} + +func (s *Column_aliasContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Column_aliasContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Column_aliasContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterColumn_alias(s) + } +} + +func (s *Column_aliasContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitColumn_alias(s) + } +} + +func (p *SQLiteParser) Column_alias() (localctx IColumn_aliasContext) { + this := p + _ = this + + localctx = NewColumn_aliasContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 174, SQLiteParserRULE_column_alias) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(1996) + _la = p.GetTokenStream().LA(1) + + if !(_la == SQLiteParserIDENTIFIER || _la == SQLiteParserSTRING_LITERAL) { + p.GetErrorHandler().RecoverInline(p) + } else { + p.GetErrorHandler().ReportMatch(p) + p.Consume() + } + } + + return localctx +} + +// IKeywordContext is an interface to support dynamic dispatch. +type IKeywordContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + ABORT_() antlr.TerminalNode + ACTION_() antlr.TerminalNode + ADD_() antlr.TerminalNode + AFTER_() antlr.TerminalNode + ALL_() antlr.TerminalNode + ALTER_() antlr.TerminalNode + ANALYZE_() antlr.TerminalNode + AND_() antlr.TerminalNode + AS_() antlr.TerminalNode + ASC_() antlr.TerminalNode + ATTACH_() antlr.TerminalNode + AUTOINCREMENT_() antlr.TerminalNode + BEFORE_() antlr.TerminalNode + BEGIN_() antlr.TerminalNode + BETWEEN_() antlr.TerminalNode + BY_() antlr.TerminalNode + CASCADE_() antlr.TerminalNode + CASE_() antlr.TerminalNode + CAST_() antlr.TerminalNode + CHECK_() antlr.TerminalNode + COLLATE_() antlr.TerminalNode + COLUMN_() antlr.TerminalNode + COMMIT_() antlr.TerminalNode + CONFLICT_() antlr.TerminalNode + CONSTRAINT_() antlr.TerminalNode + CREATE_() antlr.TerminalNode + CROSS_() antlr.TerminalNode + CURRENT_DATE_() antlr.TerminalNode + CURRENT_TIME_() antlr.TerminalNode + CURRENT_TIMESTAMP_() antlr.TerminalNode + DATABASE_() antlr.TerminalNode + DEFAULT_() antlr.TerminalNode + DEFERRABLE_() antlr.TerminalNode + DEFERRED_() antlr.TerminalNode + DELETE_() antlr.TerminalNode + DESC_() antlr.TerminalNode + DETACH_() antlr.TerminalNode + DISTINCT_() antlr.TerminalNode + DROP_() antlr.TerminalNode + EACH_() antlr.TerminalNode + ELSE_() antlr.TerminalNode + END_() antlr.TerminalNode + ESCAPE_() antlr.TerminalNode + EXCEPT_() antlr.TerminalNode + EXCLUSIVE_() antlr.TerminalNode + EXISTS_() antlr.TerminalNode + EXPLAIN_() antlr.TerminalNode + FAIL_() antlr.TerminalNode + FOR_() antlr.TerminalNode + FOREIGN_() antlr.TerminalNode + FROM_() antlr.TerminalNode + FULL_() antlr.TerminalNode + GLOB_() antlr.TerminalNode + GROUP_() antlr.TerminalNode + HAVING_() antlr.TerminalNode + IF_() antlr.TerminalNode + IGNORE_() antlr.TerminalNode + IMMEDIATE_() antlr.TerminalNode + IN_() antlr.TerminalNode + INDEX_() antlr.TerminalNode + INDEXED_() antlr.TerminalNode + INITIALLY_() antlr.TerminalNode + INNER_() antlr.TerminalNode + INSERT_() antlr.TerminalNode + INSTEAD_() antlr.TerminalNode + INTERSECT_() antlr.TerminalNode + INTO_() antlr.TerminalNode + IS_() antlr.TerminalNode + ISNULL_() antlr.TerminalNode + JOIN_() antlr.TerminalNode + KEY_() antlr.TerminalNode + LEFT_() antlr.TerminalNode + LIKE_() antlr.TerminalNode + LIMIT_() antlr.TerminalNode + MATCH_() antlr.TerminalNode + NATURAL_() antlr.TerminalNode + NO_() antlr.TerminalNode + NOT_() antlr.TerminalNode + NOTNULL_() antlr.TerminalNode + NULL_() antlr.TerminalNode + OF_() antlr.TerminalNode + OFFSET_() antlr.TerminalNode + ON_() antlr.TerminalNode + OR_() antlr.TerminalNode + ORDER_() antlr.TerminalNode + OUTER_() antlr.TerminalNode + PLAN_() antlr.TerminalNode + PRAGMA_() antlr.TerminalNode + PRIMARY_() antlr.TerminalNode + QUERY_() antlr.TerminalNode + RAISE_() antlr.TerminalNode + RECURSIVE_() antlr.TerminalNode + REFERENCES_() antlr.TerminalNode + REGEXP_() antlr.TerminalNode + REINDEX_() antlr.TerminalNode + RELEASE_() antlr.TerminalNode + RENAME_() antlr.TerminalNode + REPLACE_() antlr.TerminalNode + RESTRICT_() antlr.TerminalNode + RIGHT_() antlr.TerminalNode + ROLLBACK_() antlr.TerminalNode + ROW_() antlr.TerminalNode + ROWS_() antlr.TerminalNode + SAVEPOINT_() antlr.TerminalNode + SELECT_() antlr.TerminalNode + SET_() antlr.TerminalNode + TABLE_() antlr.TerminalNode + TEMP_() antlr.TerminalNode + TEMPORARY_() antlr.TerminalNode + THEN_() antlr.TerminalNode + TO_() antlr.TerminalNode + TRANSACTION_() antlr.TerminalNode + TRIGGER_() antlr.TerminalNode + UNION_() antlr.TerminalNode + UNIQUE_() antlr.TerminalNode + UPDATE_() antlr.TerminalNode + USING_() antlr.TerminalNode + VACUUM_() antlr.TerminalNode + VALUES_() antlr.TerminalNode + VIEW_() antlr.TerminalNode + VIRTUAL_() antlr.TerminalNode + WHEN_() antlr.TerminalNode + WHERE_() antlr.TerminalNode + WITH_() antlr.TerminalNode + WITHOUT_() antlr.TerminalNode + FIRST_VALUE_() antlr.TerminalNode + OVER_() antlr.TerminalNode + PARTITION_() antlr.TerminalNode + RANGE_() antlr.TerminalNode + PRECEDING_() antlr.TerminalNode + UNBOUNDED_() antlr.TerminalNode + CURRENT_() antlr.TerminalNode + FOLLOWING_() antlr.TerminalNode + CUME_DIST_() antlr.TerminalNode + DENSE_RANK_() antlr.TerminalNode + LAG_() antlr.TerminalNode + LAST_VALUE_() antlr.TerminalNode + LEAD_() antlr.TerminalNode + NTH_VALUE_() antlr.TerminalNode + NTILE_() antlr.TerminalNode + PERCENT_RANK_() antlr.TerminalNode + RANK_() antlr.TerminalNode + ROW_NUMBER_() antlr.TerminalNode + GENERATED_() antlr.TerminalNode + ALWAYS_() antlr.TerminalNode + STORED_() antlr.TerminalNode + TRUE_() antlr.TerminalNode + FALSE_() antlr.TerminalNode + WINDOW_() antlr.TerminalNode + NULLS_() antlr.TerminalNode + FIRST_() antlr.TerminalNode + LAST_() antlr.TerminalNode + FILTER_() antlr.TerminalNode + GROUPS_() antlr.TerminalNode + EXCLUDE_() antlr.TerminalNode + + // IsKeywordContext differentiates from other interfaces. + IsKeywordContext() +} + +type KeywordContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyKeywordContext() *KeywordContext { + var p = new(KeywordContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_keyword + return p +} + +func (*KeywordContext) IsKeywordContext() {} + +func NewKeywordContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *KeywordContext { + var p = new(KeywordContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_keyword + + return p +} + +func (s *KeywordContext) GetParser() antlr.Parser { return s.parser } + +func (s *KeywordContext) ABORT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserABORT_, 0) +} + +func (s *KeywordContext) ACTION_() antlr.TerminalNode { + return s.GetToken(SQLiteParserACTION_, 0) +} + +func (s *KeywordContext) ADD_() antlr.TerminalNode { + return s.GetToken(SQLiteParserADD_, 0) +} + +func (s *KeywordContext) AFTER_() antlr.TerminalNode { + return s.GetToken(SQLiteParserAFTER_, 0) +} + +func (s *KeywordContext) ALL_() antlr.TerminalNode { + return s.GetToken(SQLiteParserALL_, 0) +} + +func (s *KeywordContext) ALTER_() antlr.TerminalNode { + return s.GetToken(SQLiteParserALTER_, 0) +} + +func (s *KeywordContext) ANALYZE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserANALYZE_, 0) +} + +func (s *KeywordContext) AND_() antlr.TerminalNode { + return s.GetToken(SQLiteParserAND_, 0) +} + +func (s *KeywordContext) AS_() antlr.TerminalNode { + return s.GetToken(SQLiteParserAS_, 0) +} + +func (s *KeywordContext) ASC_() antlr.TerminalNode { + return s.GetToken(SQLiteParserASC_, 0) +} + +func (s *KeywordContext) ATTACH_() antlr.TerminalNode { + return s.GetToken(SQLiteParserATTACH_, 0) +} + +func (s *KeywordContext) AUTOINCREMENT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserAUTOINCREMENT_, 0) +} + +func (s *KeywordContext) BEFORE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserBEFORE_, 0) +} + +func (s *KeywordContext) BEGIN_() antlr.TerminalNode { + return s.GetToken(SQLiteParserBEGIN_, 0) +} + +func (s *KeywordContext) BETWEEN_() antlr.TerminalNode { + return s.GetToken(SQLiteParserBETWEEN_, 0) +} + +func (s *KeywordContext) BY_() antlr.TerminalNode { + return s.GetToken(SQLiteParserBY_, 0) +} + +func (s *KeywordContext) CASCADE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserCASCADE_, 0) +} + +func (s *KeywordContext) CASE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserCASE_, 0) +} + +func (s *KeywordContext) CAST_() antlr.TerminalNode { + return s.GetToken(SQLiteParserCAST_, 0) +} + +func (s *KeywordContext) CHECK_() antlr.TerminalNode { + return s.GetToken(SQLiteParserCHECK_, 0) +} + +func (s *KeywordContext) COLLATE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserCOLLATE_, 0) +} + +func (s *KeywordContext) COLUMN_() antlr.TerminalNode { + return s.GetToken(SQLiteParserCOLUMN_, 0) +} + +func (s *KeywordContext) COMMIT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserCOMMIT_, 0) +} + +func (s *KeywordContext) CONFLICT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserCONFLICT_, 0) +} + +func (s *KeywordContext) CONSTRAINT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserCONSTRAINT_, 0) +} + +func (s *KeywordContext) CREATE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserCREATE_, 0) +} + +func (s *KeywordContext) CROSS_() antlr.TerminalNode { + return s.GetToken(SQLiteParserCROSS_, 0) +} + +func (s *KeywordContext) CURRENT_DATE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserCURRENT_DATE_, 0) +} + +func (s *KeywordContext) CURRENT_TIME_() antlr.TerminalNode { + return s.GetToken(SQLiteParserCURRENT_TIME_, 0) +} + +func (s *KeywordContext) CURRENT_TIMESTAMP_() antlr.TerminalNode { + return s.GetToken(SQLiteParserCURRENT_TIMESTAMP_, 0) +} + +func (s *KeywordContext) DATABASE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserDATABASE_, 0) +} + +func (s *KeywordContext) DEFAULT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserDEFAULT_, 0) +} + +func (s *KeywordContext) DEFERRABLE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserDEFERRABLE_, 0) +} + +func (s *KeywordContext) DEFERRED_() antlr.TerminalNode { + return s.GetToken(SQLiteParserDEFERRED_, 0) +} + +func (s *KeywordContext) DELETE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserDELETE_, 0) +} + +func (s *KeywordContext) DESC_() antlr.TerminalNode { + return s.GetToken(SQLiteParserDESC_, 0) +} + +func (s *KeywordContext) DETACH_() antlr.TerminalNode { + return s.GetToken(SQLiteParserDETACH_, 0) +} + +func (s *KeywordContext) DISTINCT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserDISTINCT_, 0) +} + +func (s *KeywordContext) DROP_() antlr.TerminalNode { + return s.GetToken(SQLiteParserDROP_, 0) +} + +func (s *KeywordContext) EACH_() antlr.TerminalNode { + return s.GetToken(SQLiteParserEACH_, 0) +} + +func (s *KeywordContext) ELSE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserELSE_, 0) +} + +func (s *KeywordContext) END_() antlr.TerminalNode { + return s.GetToken(SQLiteParserEND_, 0) +} + +func (s *KeywordContext) ESCAPE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserESCAPE_, 0) +} + +func (s *KeywordContext) EXCEPT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserEXCEPT_, 0) +} + +func (s *KeywordContext) EXCLUSIVE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserEXCLUSIVE_, 0) +} + +func (s *KeywordContext) EXISTS_() antlr.TerminalNode { + return s.GetToken(SQLiteParserEXISTS_, 0) +} + +func (s *KeywordContext) EXPLAIN_() antlr.TerminalNode { + return s.GetToken(SQLiteParserEXPLAIN_, 0) +} + +func (s *KeywordContext) FAIL_() antlr.TerminalNode { + return s.GetToken(SQLiteParserFAIL_, 0) +} + +func (s *KeywordContext) FOR_() antlr.TerminalNode { + return s.GetToken(SQLiteParserFOR_, 0) +} + +func (s *KeywordContext) FOREIGN_() antlr.TerminalNode { + return s.GetToken(SQLiteParserFOREIGN_, 0) +} + +func (s *KeywordContext) FROM_() antlr.TerminalNode { + return s.GetToken(SQLiteParserFROM_, 0) +} + +func (s *KeywordContext) FULL_() antlr.TerminalNode { + return s.GetToken(SQLiteParserFULL_, 0) +} + +func (s *KeywordContext) GLOB_() antlr.TerminalNode { + return s.GetToken(SQLiteParserGLOB_, 0) +} + +func (s *KeywordContext) GROUP_() antlr.TerminalNode { + return s.GetToken(SQLiteParserGROUP_, 0) +} + +func (s *KeywordContext) HAVING_() antlr.TerminalNode { + return s.GetToken(SQLiteParserHAVING_, 0) +} + +func (s *KeywordContext) IF_() antlr.TerminalNode { + return s.GetToken(SQLiteParserIF_, 0) +} + +func (s *KeywordContext) IGNORE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserIGNORE_, 0) +} + +func (s *KeywordContext) IMMEDIATE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserIMMEDIATE_, 0) +} + +func (s *KeywordContext) IN_() antlr.TerminalNode { + return s.GetToken(SQLiteParserIN_, 0) +} + +func (s *KeywordContext) INDEX_() antlr.TerminalNode { + return s.GetToken(SQLiteParserINDEX_, 0) +} + +func (s *KeywordContext) INDEXED_() antlr.TerminalNode { + return s.GetToken(SQLiteParserINDEXED_, 0) +} + +func (s *KeywordContext) INITIALLY_() antlr.TerminalNode { + return s.GetToken(SQLiteParserINITIALLY_, 0) +} + +func (s *KeywordContext) INNER_() antlr.TerminalNode { + return s.GetToken(SQLiteParserINNER_, 0) +} + +func (s *KeywordContext) INSERT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserINSERT_, 0) +} + +func (s *KeywordContext) INSTEAD_() antlr.TerminalNode { + return s.GetToken(SQLiteParserINSTEAD_, 0) +} + +func (s *KeywordContext) INTERSECT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserINTERSECT_, 0) +} + +func (s *KeywordContext) INTO_() antlr.TerminalNode { + return s.GetToken(SQLiteParserINTO_, 0) +} + +func (s *KeywordContext) IS_() antlr.TerminalNode { + return s.GetToken(SQLiteParserIS_, 0) +} + +func (s *KeywordContext) ISNULL_() antlr.TerminalNode { + return s.GetToken(SQLiteParserISNULL_, 0) +} + +func (s *KeywordContext) JOIN_() antlr.TerminalNode { + return s.GetToken(SQLiteParserJOIN_, 0) +} + +func (s *KeywordContext) KEY_() antlr.TerminalNode { + return s.GetToken(SQLiteParserKEY_, 0) +} + +func (s *KeywordContext) LEFT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserLEFT_, 0) +} + +func (s *KeywordContext) LIKE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserLIKE_, 0) +} + +func (s *KeywordContext) LIMIT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserLIMIT_, 0) +} + +func (s *KeywordContext) MATCH_() antlr.TerminalNode { + return s.GetToken(SQLiteParserMATCH_, 0) +} + +func (s *KeywordContext) NATURAL_() antlr.TerminalNode { + return s.GetToken(SQLiteParserNATURAL_, 0) +} + +func (s *KeywordContext) NO_() antlr.TerminalNode { + return s.GetToken(SQLiteParserNO_, 0) +} + +func (s *KeywordContext) NOT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserNOT_, 0) +} + +func (s *KeywordContext) NOTNULL_() antlr.TerminalNode { + return s.GetToken(SQLiteParserNOTNULL_, 0) +} + +func (s *KeywordContext) NULL_() antlr.TerminalNode { + return s.GetToken(SQLiteParserNULL_, 0) +} + +func (s *KeywordContext) OF_() antlr.TerminalNode { + return s.GetToken(SQLiteParserOF_, 0) +} + +func (s *KeywordContext) OFFSET_() antlr.TerminalNode { + return s.GetToken(SQLiteParserOFFSET_, 0) +} + +func (s *KeywordContext) ON_() antlr.TerminalNode { + return s.GetToken(SQLiteParserON_, 0) +} + +func (s *KeywordContext) OR_() antlr.TerminalNode { + return s.GetToken(SQLiteParserOR_, 0) +} + +func (s *KeywordContext) ORDER_() antlr.TerminalNode { + return s.GetToken(SQLiteParserORDER_, 0) +} + +func (s *KeywordContext) OUTER_() antlr.TerminalNode { + return s.GetToken(SQLiteParserOUTER_, 0) +} + +func (s *KeywordContext) PLAN_() antlr.TerminalNode { + return s.GetToken(SQLiteParserPLAN_, 0) +} + +func (s *KeywordContext) PRAGMA_() antlr.TerminalNode { + return s.GetToken(SQLiteParserPRAGMA_, 0) +} + +func (s *KeywordContext) PRIMARY_() antlr.TerminalNode { + return s.GetToken(SQLiteParserPRIMARY_, 0) +} + +func (s *KeywordContext) QUERY_() antlr.TerminalNode { + return s.GetToken(SQLiteParserQUERY_, 0) +} + +func (s *KeywordContext) RAISE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserRAISE_, 0) +} + +func (s *KeywordContext) RECURSIVE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserRECURSIVE_, 0) +} + +func (s *KeywordContext) REFERENCES_() antlr.TerminalNode { + return s.GetToken(SQLiteParserREFERENCES_, 0) +} + +func (s *KeywordContext) REGEXP_() antlr.TerminalNode { + return s.GetToken(SQLiteParserREGEXP_, 0) +} + +func (s *KeywordContext) REINDEX_() antlr.TerminalNode { + return s.GetToken(SQLiteParserREINDEX_, 0) +} + +func (s *KeywordContext) RELEASE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserRELEASE_, 0) +} + +func (s *KeywordContext) RENAME_() antlr.TerminalNode { + return s.GetToken(SQLiteParserRENAME_, 0) +} + +func (s *KeywordContext) REPLACE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserREPLACE_, 0) +} + +func (s *KeywordContext) RESTRICT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserRESTRICT_, 0) +} + +func (s *KeywordContext) RIGHT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserRIGHT_, 0) +} + +func (s *KeywordContext) ROLLBACK_() antlr.TerminalNode { + return s.GetToken(SQLiteParserROLLBACK_, 0) +} + +func (s *KeywordContext) ROW_() antlr.TerminalNode { + return s.GetToken(SQLiteParserROW_, 0) +} + +func (s *KeywordContext) ROWS_() antlr.TerminalNode { + return s.GetToken(SQLiteParserROWS_, 0) +} + +func (s *KeywordContext) SAVEPOINT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserSAVEPOINT_, 0) +} + +func (s *KeywordContext) SELECT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserSELECT_, 0) +} + +func (s *KeywordContext) SET_() antlr.TerminalNode { + return s.GetToken(SQLiteParserSET_, 0) +} + +func (s *KeywordContext) TABLE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserTABLE_, 0) +} + +func (s *KeywordContext) TEMP_() antlr.TerminalNode { + return s.GetToken(SQLiteParserTEMP_, 0) +} + +func (s *KeywordContext) TEMPORARY_() antlr.TerminalNode { + return s.GetToken(SQLiteParserTEMPORARY_, 0) +} + +func (s *KeywordContext) THEN_() antlr.TerminalNode { + return s.GetToken(SQLiteParserTHEN_, 0) +} + +func (s *KeywordContext) TO_() antlr.TerminalNode { + return s.GetToken(SQLiteParserTO_, 0) +} + +func (s *KeywordContext) TRANSACTION_() antlr.TerminalNode { + return s.GetToken(SQLiteParserTRANSACTION_, 0) +} + +func (s *KeywordContext) TRIGGER_() antlr.TerminalNode { + return s.GetToken(SQLiteParserTRIGGER_, 0) +} + +func (s *KeywordContext) UNION_() antlr.TerminalNode { + return s.GetToken(SQLiteParserUNION_, 0) +} + +func (s *KeywordContext) UNIQUE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserUNIQUE_, 0) +} + +func (s *KeywordContext) UPDATE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserUPDATE_, 0) +} + +func (s *KeywordContext) USING_() antlr.TerminalNode { + return s.GetToken(SQLiteParserUSING_, 0) +} + +func (s *KeywordContext) VACUUM_() antlr.TerminalNode { + return s.GetToken(SQLiteParserVACUUM_, 0) +} + +func (s *KeywordContext) VALUES_() antlr.TerminalNode { + return s.GetToken(SQLiteParserVALUES_, 0) +} + +func (s *KeywordContext) VIEW_() antlr.TerminalNode { + return s.GetToken(SQLiteParserVIEW_, 0) +} + +func (s *KeywordContext) VIRTUAL_() antlr.TerminalNode { + return s.GetToken(SQLiteParserVIRTUAL_, 0) +} + +func (s *KeywordContext) WHEN_() antlr.TerminalNode { + return s.GetToken(SQLiteParserWHEN_, 0) +} + +func (s *KeywordContext) WHERE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserWHERE_, 0) +} + +func (s *KeywordContext) WITH_() antlr.TerminalNode { + return s.GetToken(SQLiteParserWITH_, 0) +} + +func (s *KeywordContext) WITHOUT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserWITHOUT_, 0) +} + +func (s *KeywordContext) FIRST_VALUE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserFIRST_VALUE_, 0) +} + +func (s *KeywordContext) OVER_() antlr.TerminalNode { + return s.GetToken(SQLiteParserOVER_, 0) +} + +func (s *KeywordContext) PARTITION_() antlr.TerminalNode { + return s.GetToken(SQLiteParserPARTITION_, 0) +} + +func (s *KeywordContext) RANGE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserRANGE_, 0) +} + +func (s *KeywordContext) PRECEDING_() antlr.TerminalNode { + return s.GetToken(SQLiteParserPRECEDING_, 0) +} + +func (s *KeywordContext) UNBOUNDED_() antlr.TerminalNode { + return s.GetToken(SQLiteParserUNBOUNDED_, 0) +} + +func (s *KeywordContext) CURRENT_() antlr.TerminalNode { + return s.GetToken(SQLiteParserCURRENT_, 0) +} + +func (s *KeywordContext) FOLLOWING_() antlr.TerminalNode { + return s.GetToken(SQLiteParserFOLLOWING_, 0) +} + +func (s *KeywordContext) CUME_DIST_() antlr.TerminalNode { + return s.GetToken(SQLiteParserCUME_DIST_, 0) +} + +func (s *KeywordContext) DENSE_RANK_() antlr.TerminalNode { + return s.GetToken(SQLiteParserDENSE_RANK_, 0) +} + +func (s *KeywordContext) LAG_() antlr.TerminalNode { + return s.GetToken(SQLiteParserLAG_, 0) +} + +func (s *KeywordContext) LAST_VALUE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserLAST_VALUE_, 0) +} + +func (s *KeywordContext) LEAD_() antlr.TerminalNode { + return s.GetToken(SQLiteParserLEAD_, 0) +} + +func (s *KeywordContext) NTH_VALUE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserNTH_VALUE_, 0) +} + +func (s *KeywordContext) NTILE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserNTILE_, 0) +} + +func (s *KeywordContext) PERCENT_RANK_() antlr.TerminalNode { + return s.GetToken(SQLiteParserPERCENT_RANK_, 0) +} + +func (s *KeywordContext) RANK_() antlr.TerminalNode { + return s.GetToken(SQLiteParserRANK_, 0) +} + +func (s *KeywordContext) ROW_NUMBER_() antlr.TerminalNode { + return s.GetToken(SQLiteParserROW_NUMBER_, 0) +} + +func (s *KeywordContext) GENERATED_() antlr.TerminalNode { + return s.GetToken(SQLiteParserGENERATED_, 0) +} + +func (s *KeywordContext) ALWAYS_() antlr.TerminalNode { + return s.GetToken(SQLiteParserALWAYS_, 0) +} + +func (s *KeywordContext) STORED_() antlr.TerminalNode { + return s.GetToken(SQLiteParserSTORED_, 0) +} + +func (s *KeywordContext) TRUE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserTRUE_, 0) +} + +func (s *KeywordContext) FALSE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserFALSE_, 0) +} + +func (s *KeywordContext) WINDOW_() antlr.TerminalNode { + return s.GetToken(SQLiteParserWINDOW_, 0) +} + +func (s *KeywordContext) NULLS_() antlr.TerminalNode { + return s.GetToken(SQLiteParserNULLS_, 0) +} + +func (s *KeywordContext) FIRST_() antlr.TerminalNode { + return s.GetToken(SQLiteParserFIRST_, 0) +} + +func (s *KeywordContext) LAST_() antlr.TerminalNode { + return s.GetToken(SQLiteParserLAST_, 0) +} + +func (s *KeywordContext) FILTER_() antlr.TerminalNode { + return s.GetToken(SQLiteParserFILTER_, 0) +} + +func (s *KeywordContext) GROUPS_() antlr.TerminalNode { + return s.GetToken(SQLiteParserGROUPS_, 0) +} + +func (s *KeywordContext) EXCLUDE_() antlr.TerminalNode { + return s.GetToken(SQLiteParserEXCLUDE_, 0) +} + +func (s *KeywordContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *KeywordContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *KeywordContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterKeyword(s) + } +} + +func (s *KeywordContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitKeyword(s) + } +} + +func (p *SQLiteParser) Keyword() (localctx IKeywordContext) { + this := p + _ = this + + localctx = NewKeywordContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 176, SQLiteParserRULE_keyword) + var _la int + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(1998) + _la = p.GetTokenStream().LA(1) + + if !(((int64(_la) & ^0x3f) == 0 && ((int64(1)<<_la)&-33554432) != 0) || ((int64((_la-64)) & ^0x3f) == 0 && ((int64(1)<<(_la-64))&-1152921504606846977) != 0) || ((int64((_la-128)) & ^0x3f) == 0 && ((int64(1)<<(_la-128))&9007199254740991) != 0)) { + p.GetErrorHandler().RecoverInline(p) + } else { + p.GetErrorHandler().ReportMatch(p) + p.Consume() + } + } + + return localctx +} + +// INameContext is an interface to support dynamic dispatch. +type INameContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + Any_name() IAny_nameContext + + // IsNameContext differentiates from other interfaces. + IsNameContext() +} + +type NameContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyNameContext() *NameContext { + var p = new(NameContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_name + return p +} + +func (*NameContext) IsNameContext() {} + +func NewNameContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *NameContext { + var p = new(NameContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_name + + return p +} + +func (s *NameContext) GetParser() antlr.Parser { return s.parser } + +func (s *NameContext) Any_name() IAny_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IAny_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IAny_nameContext) +} + +func (s *NameContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *NameContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *NameContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterName(s) + } +} + +func (s *NameContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitName(s) + } +} + +func (p *SQLiteParser) Name() (localctx INameContext) { + this := p + _ = this + + localctx = NewNameContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 178, SQLiteParserRULE_name) + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(2000) + p.Any_name() + } + + return localctx +} + +// IFunction_nameContext is an interface to support dynamic dispatch. +type IFunction_nameContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + Any_name() IAny_nameContext + + // IsFunction_nameContext differentiates from other interfaces. + IsFunction_nameContext() +} + +type Function_nameContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyFunction_nameContext() *Function_nameContext { + var p = new(Function_nameContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_function_name + return p +} + +func (*Function_nameContext) IsFunction_nameContext() {} + +func NewFunction_nameContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Function_nameContext { + var p = new(Function_nameContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_function_name + + return p +} + +func (s *Function_nameContext) GetParser() antlr.Parser { return s.parser } + +func (s *Function_nameContext) Any_name() IAny_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IAny_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IAny_nameContext) +} + +func (s *Function_nameContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Function_nameContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Function_nameContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterFunction_name(s) + } +} + +func (s *Function_nameContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitFunction_name(s) + } +} + +func (p *SQLiteParser) Function_name() (localctx IFunction_nameContext) { + this := p + _ = this + + localctx = NewFunction_nameContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 180, SQLiteParserRULE_function_name) + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(2002) + p.Any_name() + } + + return localctx +} + +// ISchema_nameContext is an interface to support dynamic dispatch. +type ISchema_nameContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + Any_name() IAny_nameContext + + // IsSchema_nameContext differentiates from other interfaces. + IsSchema_nameContext() +} + +type Schema_nameContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptySchema_nameContext() *Schema_nameContext { + var p = new(Schema_nameContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_schema_name + return p +} + +func (*Schema_nameContext) IsSchema_nameContext() {} + +func NewSchema_nameContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Schema_nameContext { + var p = new(Schema_nameContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_schema_name + + return p +} + +func (s *Schema_nameContext) GetParser() antlr.Parser { return s.parser } + +func (s *Schema_nameContext) Any_name() IAny_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IAny_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IAny_nameContext) +} + +func (s *Schema_nameContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Schema_nameContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Schema_nameContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterSchema_name(s) + } +} + +func (s *Schema_nameContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitSchema_name(s) + } +} + +func (p *SQLiteParser) Schema_name() (localctx ISchema_nameContext) { + this := p + _ = this + + localctx = NewSchema_nameContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 182, SQLiteParserRULE_schema_name) + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(2004) + p.Any_name() + } + + return localctx +} + +// ITable_nameContext is an interface to support dynamic dispatch. +type ITable_nameContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + Any_name() IAny_nameContext + + // IsTable_nameContext differentiates from other interfaces. + IsTable_nameContext() +} + +type Table_nameContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyTable_nameContext() *Table_nameContext { + var p = new(Table_nameContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_table_name + return p +} + +func (*Table_nameContext) IsTable_nameContext() {} + +func NewTable_nameContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Table_nameContext { + var p = new(Table_nameContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_table_name + + return p +} + +func (s *Table_nameContext) GetParser() antlr.Parser { return s.parser } + +func (s *Table_nameContext) Any_name() IAny_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IAny_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IAny_nameContext) +} + +func (s *Table_nameContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Table_nameContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Table_nameContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterTable_name(s) + } +} + +func (s *Table_nameContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitTable_name(s) + } +} + +func (p *SQLiteParser) Table_name() (localctx ITable_nameContext) { + this := p + _ = this + + localctx = NewTable_nameContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 184, SQLiteParserRULE_table_name) + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(2006) + p.Any_name() + } + + return localctx +} + +// ITable_or_index_nameContext is an interface to support dynamic dispatch. +type ITable_or_index_nameContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + Any_name() IAny_nameContext + + // IsTable_or_index_nameContext differentiates from other interfaces. + IsTable_or_index_nameContext() +} + +type Table_or_index_nameContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyTable_or_index_nameContext() *Table_or_index_nameContext { + var p = new(Table_or_index_nameContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_table_or_index_name + return p +} + +func (*Table_or_index_nameContext) IsTable_or_index_nameContext() {} + +func NewTable_or_index_nameContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Table_or_index_nameContext { + var p = new(Table_or_index_nameContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_table_or_index_name + + return p +} + +func (s *Table_or_index_nameContext) GetParser() antlr.Parser { return s.parser } + +func (s *Table_or_index_nameContext) Any_name() IAny_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IAny_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IAny_nameContext) +} + +func (s *Table_or_index_nameContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Table_or_index_nameContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Table_or_index_nameContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterTable_or_index_name(s) + } +} + +func (s *Table_or_index_nameContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitTable_or_index_name(s) + } +} + +func (p *SQLiteParser) Table_or_index_name() (localctx ITable_or_index_nameContext) { + this := p + _ = this + + localctx = NewTable_or_index_nameContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 186, SQLiteParserRULE_table_or_index_name) + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(2008) + p.Any_name() + } + + return localctx +} + +// IColumn_nameContext is an interface to support dynamic dispatch. +type IColumn_nameContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + Any_name() IAny_nameContext + + // IsColumn_nameContext differentiates from other interfaces. + IsColumn_nameContext() +} + +type Column_nameContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyColumn_nameContext() *Column_nameContext { + var p = new(Column_nameContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_column_name + return p +} + +func (*Column_nameContext) IsColumn_nameContext() {} + +func NewColumn_nameContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Column_nameContext { + var p = new(Column_nameContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_column_name + + return p +} + +func (s *Column_nameContext) GetParser() antlr.Parser { return s.parser } + +func (s *Column_nameContext) Any_name() IAny_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IAny_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IAny_nameContext) +} + +func (s *Column_nameContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Column_nameContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Column_nameContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterColumn_name(s) + } +} + +func (s *Column_nameContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitColumn_name(s) + } +} + +func (p *SQLiteParser) Column_name() (localctx IColumn_nameContext) { + this := p + _ = this + + localctx = NewColumn_nameContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 188, SQLiteParserRULE_column_name) + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(2010) + p.Any_name() + } + + return localctx +} + +// ICollation_nameContext is an interface to support dynamic dispatch. +type ICollation_nameContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + Any_name() IAny_nameContext + + // IsCollation_nameContext differentiates from other interfaces. + IsCollation_nameContext() +} + +type Collation_nameContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyCollation_nameContext() *Collation_nameContext { + var p = new(Collation_nameContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_collation_name + return p +} + +func (*Collation_nameContext) IsCollation_nameContext() {} + +func NewCollation_nameContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Collation_nameContext { + var p = new(Collation_nameContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_collation_name + + return p +} + +func (s *Collation_nameContext) GetParser() antlr.Parser { return s.parser } + +func (s *Collation_nameContext) Any_name() IAny_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IAny_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IAny_nameContext) +} + +func (s *Collation_nameContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Collation_nameContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Collation_nameContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterCollation_name(s) + } +} + +func (s *Collation_nameContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitCollation_name(s) + } +} + +func (p *SQLiteParser) Collation_name() (localctx ICollation_nameContext) { + this := p + _ = this + + localctx = NewCollation_nameContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 190, SQLiteParserRULE_collation_name) + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(2012) + p.Any_name() + } + + return localctx +} + +// IForeign_tableContext is an interface to support dynamic dispatch. +type IForeign_tableContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + Any_name() IAny_nameContext + + // IsForeign_tableContext differentiates from other interfaces. + IsForeign_tableContext() +} + +type Foreign_tableContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyForeign_tableContext() *Foreign_tableContext { + var p = new(Foreign_tableContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_foreign_table + return p +} + +func (*Foreign_tableContext) IsForeign_tableContext() {} + +func NewForeign_tableContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Foreign_tableContext { + var p = new(Foreign_tableContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_foreign_table + + return p +} + +func (s *Foreign_tableContext) GetParser() antlr.Parser { return s.parser } + +func (s *Foreign_tableContext) Any_name() IAny_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IAny_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IAny_nameContext) +} + +func (s *Foreign_tableContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Foreign_tableContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Foreign_tableContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterForeign_table(s) + } +} + +func (s *Foreign_tableContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitForeign_table(s) + } +} + +func (p *SQLiteParser) Foreign_table() (localctx IForeign_tableContext) { + this := p + _ = this + + localctx = NewForeign_tableContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 192, SQLiteParserRULE_foreign_table) + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(2014) + p.Any_name() + } + + return localctx +} + +// IIndex_nameContext is an interface to support dynamic dispatch. +type IIndex_nameContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + Any_name() IAny_nameContext + + // IsIndex_nameContext differentiates from other interfaces. + IsIndex_nameContext() +} + +type Index_nameContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyIndex_nameContext() *Index_nameContext { + var p = new(Index_nameContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_index_name + return p +} + +func (*Index_nameContext) IsIndex_nameContext() {} + +func NewIndex_nameContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Index_nameContext { + var p = new(Index_nameContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_index_name + + return p +} + +func (s *Index_nameContext) GetParser() antlr.Parser { return s.parser } + +func (s *Index_nameContext) Any_name() IAny_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IAny_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IAny_nameContext) +} + +func (s *Index_nameContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Index_nameContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Index_nameContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterIndex_name(s) + } +} + +func (s *Index_nameContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitIndex_name(s) + } +} + +func (p *SQLiteParser) Index_name() (localctx IIndex_nameContext) { + this := p + _ = this + + localctx = NewIndex_nameContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 194, SQLiteParserRULE_index_name) + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(2016) + p.Any_name() + } + + return localctx +} + +// ITrigger_nameContext is an interface to support dynamic dispatch. +type ITrigger_nameContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + Any_name() IAny_nameContext + + // IsTrigger_nameContext differentiates from other interfaces. + IsTrigger_nameContext() +} + +type Trigger_nameContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyTrigger_nameContext() *Trigger_nameContext { + var p = new(Trigger_nameContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_trigger_name + return p +} + +func (*Trigger_nameContext) IsTrigger_nameContext() {} + +func NewTrigger_nameContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Trigger_nameContext { + var p = new(Trigger_nameContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_trigger_name + + return p +} + +func (s *Trigger_nameContext) GetParser() antlr.Parser { return s.parser } + +func (s *Trigger_nameContext) Any_name() IAny_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IAny_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IAny_nameContext) +} + +func (s *Trigger_nameContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Trigger_nameContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Trigger_nameContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterTrigger_name(s) + } +} + +func (s *Trigger_nameContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitTrigger_name(s) + } +} + +func (p *SQLiteParser) Trigger_name() (localctx ITrigger_nameContext) { + this := p + _ = this + + localctx = NewTrigger_nameContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 196, SQLiteParserRULE_trigger_name) + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(2018) + p.Any_name() + } + + return localctx +} + +// IView_nameContext is an interface to support dynamic dispatch. +type IView_nameContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + Any_name() IAny_nameContext + + // IsView_nameContext differentiates from other interfaces. + IsView_nameContext() +} + +type View_nameContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyView_nameContext() *View_nameContext { + var p = new(View_nameContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_view_name + return p +} + +func (*View_nameContext) IsView_nameContext() {} + +func NewView_nameContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *View_nameContext { + var p = new(View_nameContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_view_name + + return p +} + +func (s *View_nameContext) GetParser() antlr.Parser { return s.parser } + +func (s *View_nameContext) Any_name() IAny_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IAny_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IAny_nameContext) +} + +func (s *View_nameContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *View_nameContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *View_nameContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterView_name(s) + } +} + +func (s *View_nameContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitView_name(s) + } +} + +func (p *SQLiteParser) View_name() (localctx IView_nameContext) { + this := p + _ = this + + localctx = NewView_nameContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 198, SQLiteParserRULE_view_name) + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(2020) + p.Any_name() + } + + return localctx +} + +// IModule_nameContext is an interface to support dynamic dispatch. +type IModule_nameContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + Any_name() IAny_nameContext + + // IsModule_nameContext differentiates from other interfaces. + IsModule_nameContext() +} + +type Module_nameContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyModule_nameContext() *Module_nameContext { + var p = new(Module_nameContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_module_name + return p +} + +func (*Module_nameContext) IsModule_nameContext() {} + +func NewModule_nameContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Module_nameContext { + var p = new(Module_nameContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_module_name + + return p +} + +func (s *Module_nameContext) GetParser() antlr.Parser { return s.parser } + +func (s *Module_nameContext) Any_name() IAny_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IAny_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IAny_nameContext) +} + +func (s *Module_nameContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Module_nameContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Module_nameContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterModule_name(s) + } +} + +func (s *Module_nameContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitModule_name(s) + } +} + +func (p *SQLiteParser) Module_name() (localctx IModule_nameContext) { + this := p + _ = this + + localctx = NewModule_nameContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 200, SQLiteParserRULE_module_name) + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(2022) + p.Any_name() + } + + return localctx +} + +// IPragma_nameContext is an interface to support dynamic dispatch. +type IPragma_nameContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + Any_name() IAny_nameContext + + // IsPragma_nameContext differentiates from other interfaces. + IsPragma_nameContext() +} + +type Pragma_nameContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyPragma_nameContext() *Pragma_nameContext { + var p = new(Pragma_nameContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_pragma_name + return p +} + +func (*Pragma_nameContext) IsPragma_nameContext() {} + +func NewPragma_nameContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Pragma_nameContext { + var p = new(Pragma_nameContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_pragma_name + + return p +} + +func (s *Pragma_nameContext) GetParser() antlr.Parser { return s.parser } + +func (s *Pragma_nameContext) Any_name() IAny_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IAny_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IAny_nameContext) +} + +func (s *Pragma_nameContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Pragma_nameContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Pragma_nameContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterPragma_name(s) + } +} + +func (s *Pragma_nameContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitPragma_name(s) + } +} + +func (p *SQLiteParser) Pragma_name() (localctx IPragma_nameContext) { + this := p + _ = this + + localctx = NewPragma_nameContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 202, SQLiteParserRULE_pragma_name) + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(2024) + p.Any_name() + } + + return localctx +} + +// ISavepoint_nameContext is an interface to support dynamic dispatch. +type ISavepoint_nameContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + Any_name() IAny_nameContext + + // IsSavepoint_nameContext differentiates from other interfaces. + IsSavepoint_nameContext() +} + +type Savepoint_nameContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptySavepoint_nameContext() *Savepoint_nameContext { + var p = new(Savepoint_nameContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_savepoint_name + return p +} + +func (*Savepoint_nameContext) IsSavepoint_nameContext() {} + +func NewSavepoint_nameContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Savepoint_nameContext { + var p = new(Savepoint_nameContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_savepoint_name + + return p +} + +func (s *Savepoint_nameContext) GetParser() antlr.Parser { return s.parser } + +func (s *Savepoint_nameContext) Any_name() IAny_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IAny_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IAny_nameContext) +} + +func (s *Savepoint_nameContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Savepoint_nameContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Savepoint_nameContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterSavepoint_name(s) + } +} + +func (s *Savepoint_nameContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitSavepoint_name(s) + } +} + +func (p *SQLiteParser) Savepoint_name() (localctx ISavepoint_nameContext) { + this := p + _ = this + + localctx = NewSavepoint_nameContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 204, SQLiteParserRULE_savepoint_name) + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(2026) + p.Any_name() + } + + return localctx +} + +// ITable_aliasContext is an interface to support dynamic dispatch. +type ITable_aliasContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + Any_name() IAny_nameContext + + // IsTable_aliasContext differentiates from other interfaces. + IsTable_aliasContext() +} + +type Table_aliasContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyTable_aliasContext() *Table_aliasContext { + var p = new(Table_aliasContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_table_alias + return p +} + +func (*Table_aliasContext) IsTable_aliasContext() {} + +func NewTable_aliasContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Table_aliasContext { + var p = new(Table_aliasContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_table_alias + + return p +} + +func (s *Table_aliasContext) GetParser() antlr.Parser { return s.parser } + +func (s *Table_aliasContext) Any_name() IAny_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IAny_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IAny_nameContext) +} + +func (s *Table_aliasContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Table_aliasContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Table_aliasContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterTable_alias(s) + } +} + +func (s *Table_aliasContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitTable_alias(s) + } +} + +func (p *SQLiteParser) Table_alias() (localctx ITable_aliasContext) { + this := p + _ = this + + localctx = NewTable_aliasContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 206, SQLiteParserRULE_table_alias) + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(2028) + p.Any_name() + } + + return localctx +} + +// ITransaction_nameContext is an interface to support dynamic dispatch. +type ITransaction_nameContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + Any_name() IAny_nameContext + + // IsTransaction_nameContext differentiates from other interfaces. + IsTransaction_nameContext() +} + +type Transaction_nameContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyTransaction_nameContext() *Transaction_nameContext { + var p = new(Transaction_nameContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_transaction_name + return p +} + +func (*Transaction_nameContext) IsTransaction_nameContext() {} + +func NewTransaction_nameContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Transaction_nameContext { + var p = new(Transaction_nameContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_transaction_name + + return p +} + +func (s *Transaction_nameContext) GetParser() antlr.Parser { return s.parser } + +func (s *Transaction_nameContext) Any_name() IAny_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IAny_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IAny_nameContext) +} + +func (s *Transaction_nameContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Transaction_nameContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Transaction_nameContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterTransaction_name(s) + } +} + +func (s *Transaction_nameContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitTransaction_name(s) + } +} + +func (p *SQLiteParser) Transaction_name() (localctx ITransaction_nameContext) { + this := p + _ = this + + localctx = NewTransaction_nameContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 208, SQLiteParserRULE_transaction_name) + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(2030) + p.Any_name() + } + + return localctx +} + +// IWindow_nameContext is an interface to support dynamic dispatch. +type IWindow_nameContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + Any_name() IAny_nameContext + + // IsWindow_nameContext differentiates from other interfaces. + IsWindow_nameContext() +} + +type Window_nameContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyWindow_nameContext() *Window_nameContext { + var p = new(Window_nameContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_window_name + return p +} + +func (*Window_nameContext) IsWindow_nameContext() {} + +func NewWindow_nameContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Window_nameContext { + var p = new(Window_nameContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_window_name + + return p +} + +func (s *Window_nameContext) GetParser() antlr.Parser { return s.parser } + +func (s *Window_nameContext) Any_name() IAny_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IAny_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IAny_nameContext) +} + +func (s *Window_nameContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Window_nameContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Window_nameContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterWindow_name(s) + } +} + +func (s *Window_nameContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitWindow_name(s) + } +} + +func (p *SQLiteParser) Window_name() (localctx IWindow_nameContext) { + this := p + _ = this + + localctx = NewWindow_nameContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 210, SQLiteParserRULE_window_name) + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(2032) + p.Any_name() + } + + return localctx +} + +// IAliasContext is an interface to support dynamic dispatch. +type IAliasContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + Any_name() IAny_nameContext + + // IsAliasContext differentiates from other interfaces. + IsAliasContext() +} + +type AliasContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyAliasContext() *AliasContext { + var p = new(AliasContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_alias + return p +} + +func (*AliasContext) IsAliasContext() {} + +func NewAliasContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *AliasContext { + var p = new(AliasContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_alias + + return p +} + +func (s *AliasContext) GetParser() antlr.Parser { return s.parser } + +func (s *AliasContext) Any_name() IAny_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IAny_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IAny_nameContext) +} + +func (s *AliasContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *AliasContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *AliasContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterAlias(s) + } +} + +func (s *AliasContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitAlias(s) + } +} + +func (p *SQLiteParser) Alias() (localctx IAliasContext) { + this := p + _ = this + + localctx = NewAliasContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 212, SQLiteParserRULE_alias) + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(2034) + p.Any_name() + } + + return localctx +} + +// IFilenameContext is an interface to support dynamic dispatch. +type IFilenameContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + Any_name() IAny_nameContext + + // IsFilenameContext differentiates from other interfaces. + IsFilenameContext() +} + +type FilenameContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyFilenameContext() *FilenameContext { + var p = new(FilenameContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_filename + return p +} + +func (*FilenameContext) IsFilenameContext() {} + +func NewFilenameContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *FilenameContext { + var p = new(FilenameContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_filename + + return p +} + +func (s *FilenameContext) GetParser() antlr.Parser { return s.parser } + +func (s *FilenameContext) Any_name() IAny_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IAny_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IAny_nameContext) +} + +func (s *FilenameContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *FilenameContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *FilenameContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterFilename(s) + } +} + +func (s *FilenameContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitFilename(s) + } +} + +func (p *SQLiteParser) Filename() (localctx IFilenameContext) { + this := p + _ = this + + localctx = NewFilenameContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 214, SQLiteParserRULE_filename) + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(2036) + p.Any_name() + } + + return localctx +} + +// IBase_window_nameContext is an interface to support dynamic dispatch. +type IBase_window_nameContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + Any_name() IAny_nameContext + + // IsBase_window_nameContext differentiates from other interfaces. + IsBase_window_nameContext() +} + +type Base_window_nameContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyBase_window_nameContext() *Base_window_nameContext { + var p = new(Base_window_nameContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_base_window_name + return p +} + +func (*Base_window_nameContext) IsBase_window_nameContext() {} + +func NewBase_window_nameContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Base_window_nameContext { + var p = new(Base_window_nameContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_base_window_name + + return p +} + +func (s *Base_window_nameContext) GetParser() antlr.Parser { return s.parser } + +func (s *Base_window_nameContext) Any_name() IAny_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IAny_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IAny_nameContext) +} + +func (s *Base_window_nameContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Base_window_nameContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Base_window_nameContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterBase_window_name(s) + } +} + +func (s *Base_window_nameContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitBase_window_name(s) + } +} + +func (p *SQLiteParser) Base_window_name() (localctx IBase_window_nameContext) { + this := p + _ = this + + localctx = NewBase_window_nameContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 216, SQLiteParserRULE_base_window_name) + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(2038) + p.Any_name() + } + + return localctx +} + +// ISimple_funcContext is an interface to support dynamic dispatch. +type ISimple_funcContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + Any_name() IAny_nameContext + + // IsSimple_funcContext differentiates from other interfaces. + IsSimple_funcContext() +} + +type Simple_funcContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptySimple_funcContext() *Simple_funcContext { + var p = new(Simple_funcContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_simple_func + return p +} + +func (*Simple_funcContext) IsSimple_funcContext() {} + +func NewSimple_funcContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Simple_funcContext { + var p = new(Simple_funcContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_simple_func + + return p +} + +func (s *Simple_funcContext) GetParser() antlr.Parser { return s.parser } + +func (s *Simple_funcContext) Any_name() IAny_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IAny_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IAny_nameContext) +} + +func (s *Simple_funcContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Simple_funcContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Simple_funcContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterSimple_func(s) + } +} + +func (s *Simple_funcContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitSimple_func(s) + } +} + +func (p *SQLiteParser) Simple_func() (localctx ISimple_funcContext) { + this := p + _ = this + + localctx = NewSimple_funcContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 218, SQLiteParserRULE_simple_func) + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(2040) + p.Any_name() + } + + return localctx +} + +// IAggregate_funcContext is an interface to support dynamic dispatch. +type IAggregate_funcContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + Any_name() IAny_nameContext + + // IsAggregate_funcContext differentiates from other interfaces. + IsAggregate_funcContext() +} + +type Aggregate_funcContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyAggregate_funcContext() *Aggregate_funcContext { + var p = new(Aggregate_funcContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_aggregate_func + return p +} + +func (*Aggregate_funcContext) IsAggregate_funcContext() {} + +func NewAggregate_funcContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Aggregate_funcContext { + var p = new(Aggregate_funcContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_aggregate_func + + return p +} + +func (s *Aggregate_funcContext) GetParser() antlr.Parser { return s.parser } + +func (s *Aggregate_funcContext) Any_name() IAny_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IAny_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IAny_nameContext) +} + +func (s *Aggregate_funcContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Aggregate_funcContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Aggregate_funcContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterAggregate_func(s) + } +} + +func (s *Aggregate_funcContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitAggregate_func(s) + } +} + +func (p *SQLiteParser) Aggregate_func() (localctx IAggregate_funcContext) { + this := p + _ = this + + localctx = NewAggregate_funcContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 220, SQLiteParserRULE_aggregate_func) + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(2042) + p.Any_name() + } + + return localctx +} + +// ITable_function_nameContext is an interface to support dynamic dispatch. +type ITable_function_nameContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + Any_name() IAny_nameContext + + // IsTable_function_nameContext differentiates from other interfaces. + IsTable_function_nameContext() +} + +type Table_function_nameContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyTable_function_nameContext() *Table_function_nameContext { + var p = new(Table_function_nameContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_table_function_name + return p +} + +func (*Table_function_nameContext) IsTable_function_nameContext() {} + +func NewTable_function_nameContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Table_function_nameContext { + var p = new(Table_function_nameContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_table_function_name + + return p +} + +func (s *Table_function_nameContext) GetParser() antlr.Parser { return s.parser } + +func (s *Table_function_nameContext) Any_name() IAny_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IAny_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IAny_nameContext) +} + +func (s *Table_function_nameContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Table_function_nameContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Table_function_nameContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterTable_function_name(s) + } +} + +func (s *Table_function_nameContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitTable_function_name(s) + } +} + +func (p *SQLiteParser) Table_function_name() (localctx ITable_function_nameContext) { + this := p + _ = this + + localctx = NewTable_function_nameContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 222, SQLiteParserRULE_table_function_name) + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(2044) + p.Any_name() + } + + return localctx +} + +// IAny_nameContext is an interface to support dynamic dispatch. +type IAny_nameContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Getter signatures + IDENTIFIER() antlr.TerminalNode + Keyword() IKeywordContext + STRING_LITERAL() antlr.TerminalNode + OPEN_PAR() antlr.TerminalNode + Any_name() IAny_nameContext + CLOSE_PAR() antlr.TerminalNode + + // IsAny_nameContext differentiates from other interfaces. + IsAny_nameContext() +} + +type Any_nameContext struct { + *antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyAny_nameContext() *Any_nameContext { + var p = new(Any_nameContext) + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1) + p.RuleIndex = SQLiteParserRULE_any_name + return p +} + +func (*Any_nameContext) IsAny_nameContext() {} + +func NewAny_nameContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *Any_nameContext { + var p = new(Any_nameContext) + + p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState) + + p.parser = parser + p.RuleIndex = SQLiteParserRULE_any_name + + return p +} + +func (s *Any_nameContext) GetParser() antlr.Parser { return s.parser } + +func (s *Any_nameContext) IDENTIFIER() antlr.TerminalNode { + return s.GetToken(SQLiteParserIDENTIFIER, 0) +} + +func (s *Any_nameContext) Keyword() IKeywordContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IKeywordContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IKeywordContext) +} + +func (s *Any_nameContext) STRING_LITERAL() antlr.TerminalNode { + return s.GetToken(SQLiteParserSTRING_LITERAL, 0) +} + +func (s *Any_nameContext) OPEN_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserOPEN_PAR, 0) +} + +func (s *Any_nameContext) Any_name() IAny_nameContext { + var t antlr.RuleContext + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IAny_nameContext); ok { + t = ctx.(antlr.RuleContext) + break + } + } + + if t == nil { + return nil + } + + return t.(IAny_nameContext) +} + +func (s *Any_nameContext) CLOSE_PAR() antlr.TerminalNode { + return s.GetToken(SQLiteParserCLOSE_PAR, 0) +} + +func (s *Any_nameContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *Any_nameContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + +func (s *Any_nameContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.EnterAny_name(s) + } +} + +func (s *Any_nameContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(SQLiteParserListener); ok { + listenerT.ExitAny_name(s) + } +} + +func (p *SQLiteParser) Any_name() (localctx IAny_nameContext) { + this := p + _ = this + + localctx = NewAny_nameContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 224, SQLiteParserRULE_any_name) + + defer func() { + p.ExitRule() + }() + + defer func() { + if err := recover(); err != nil { + if v, ok := err.(antlr.RecognitionException); ok { + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + } else { + panic(err) + } + } + }() + + p.SetState(2053) + p.GetErrorHandler().Sync(p) + + switch p.GetTokenStream().LA(1) { + case SQLiteParserIDENTIFIER: + p.EnterOuterAlt(localctx, 1) + { + p.SetState(2046) + p.Match(SQLiteParserIDENTIFIER) + } + + case SQLiteParserABORT_, SQLiteParserACTION_, SQLiteParserADD_, SQLiteParserAFTER_, SQLiteParserALL_, SQLiteParserALTER_, SQLiteParserANALYZE_, SQLiteParserAND_, SQLiteParserAS_, SQLiteParserASC_, SQLiteParserATTACH_, SQLiteParserAUTOINCREMENT_, SQLiteParserBEFORE_, SQLiteParserBEGIN_, SQLiteParserBETWEEN_, SQLiteParserBY_, SQLiteParserCASCADE_, SQLiteParserCASE_, SQLiteParserCAST_, SQLiteParserCHECK_, SQLiteParserCOLLATE_, SQLiteParserCOLUMN_, SQLiteParserCOMMIT_, SQLiteParserCONFLICT_, SQLiteParserCONSTRAINT_, SQLiteParserCREATE_, SQLiteParserCROSS_, SQLiteParserCURRENT_DATE_, SQLiteParserCURRENT_TIME_, SQLiteParserCURRENT_TIMESTAMP_, SQLiteParserDATABASE_, SQLiteParserDEFAULT_, SQLiteParserDEFERRABLE_, SQLiteParserDEFERRED_, SQLiteParserDELETE_, SQLiteParserDESC_, SQLiteParserDETACH_, SQLiteParserDISTINCT_, SQLiteParserDROP_, SQLiteParserEACH_, SQLiteParserELSE_, SQLiteParserEND_, SQLiteParserESCAPE_, SQLiteParserEXCEPT_, SQLiteParserEXCLUSIVE_, SQLiteParserEXISTS_, SQLiteParserEXPLAIN_, SQLiteParserFAIL_, SQLiteParserFOR_, SQLiteParserFOREIGN_, SQLiteParserFROM_, SQLiteParserFULL_, SQLiteParserGLOB_, SQLiteParserGROUP_, SQLiteParserHAVING_, SQLiteParserIF_, SQLiteParserIGNORE_, SQLiteParserIMMEDIATE_, SQLiteParserIN_, SQLiteParserINDEX_, SQLiteParserINDEXED_, SQLiteParserINITIALLY_, SQLiteParserINNER_, SQLiteParserINSERT_, SQLiteParserINSTEAD_, SQLiteParserINTERSECT_, SQLiteParserINTO_, SQLiteParserIS_, SQLiteParserISNULL_, SQLiteParserJOIN_, SQLiteParserKEY_, SQLiteParserLEFT_, SQLiteParserLIKE_, SQLiteParserLIMIT_, SQLiteParserMATCH_, SQLiteParserNATURAL_, SQLiteParserNO_, SQLiteParserNOT_, SQLiteParserNOTNULL_, SQLiteParserNULL_, SQLiteParserOF_, SQLiteParserOFFSET_, SQLiteParserON_, SQLiteParserOR_, SQLiteParserORDER_, SQLiteParserOUTER_, SQLiteParserPLAN_, SQLiteParserPRAGMA_, SQLiteParserPRIMARY_, SQLiteParserQUERY_, SQLiteParserRAISE_, SQLiteParserRECURSIVE_, SQLiteParserREFERENCES_, SQLiteParserREGEXP_, SQLiteParserREINDEX_, SQLiteParserRELEASE_, SQLiteParserRENAME_, SQLiteParserREPLACE_, SQLiteParserRESTRICT_, SQLiteParserRIGHT_, SQLiteParserROLLBACK_, SQLiteParserROW_, SQLiteParserROWS_, SQLiteParserSAVEPOINT_, SQLiteParserSELECT_, SQLiteParserSET_, SQLiteParserTABLE_, SQLiteParserTEMP_, SQLiteParserTEMPORARY_, SQLiteParserTHEN_, SQLiteParserTO_, SQLiteParserTRANSACTION_, SQLiteParserTRIGGER_, SQLiteParserUNION_, SQLiteParserUNIQUE_, SQLiteParserUPDATE_, SQLiteParserUSING_, SQLiteParserVACUUM_, SQLiteParserVALUES_, SQLiteParserVIEW_, SQLiteParserVIRTUAL_, SQLiteParserWHEN_, SQLiteParserWHERE_, SQLiteParserWITH_, SQLiteParserWITHOUT_, SQLiteParserFIRST_VALUE_, SQLiteParserOVER_, SQLiteParserPARTITION_, SQLiteParserRANGE_, SQLiteParserPRECEDING_, SQLiteParserUNBOUNDED_, SQLiteParserCURRENT_, SQLiteParserFOLLOWING_, SQLiteParserCUME_DIST_, SQLiteParserDENSE_RANK_, SQLiteParserLAG_, SQLiteParserLAST_VALUE_, SQLiteParserLEAD_, SQLiteParserNTH_VALUE_, SQLiteParserNTILE_, SQLiteParserPERCENT_RANK_, SQLiteParserRANK_, SQLiteParserROW_NUMBER_, SQLiteParserGENERATED_, SQLiteParserALWAYS_, SQLiteParserSTORED_, SQLiteParserTRUE_, SQLiteParserFALSE_, SQLiteParserWINDOW_, SQLiteParserNULLS_, SQLiteParserFIRST_, SQLiteParserLAST_, SQLiteParserFILTER_, SQLiteParserGROUPS_, SQLiteParserEXCLUDE_: + p.EnterOuterAlt(localctx, 2) + { + p.SetState(2047) + p.Keyword() + } + + case SQLiteParserSTRING_LITERAL: + p.EnterOuterAlt(localctx, 3) + { + p.SetState(2048) + p.Match(SQLiteParserSTRING_LITERAL) + } + + case SQLiteParserOPEN_PAR: + p.EnterOuterAlt(localctx, 4) + { + p.SetState(2049) + p.Match(SQLiteParserOPEN_PAR) + } + { + p.SetState(2050) + p.Any_name() + } + { + p.SetState(2051) + p.Match(SQLiteParserCLOSE_PAR) + } + + default: + panic(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) + } + + return localctx +} + +func (p *SQLiteParser) Sempred(localctx antlr.RuleContext, ruleIndex, predIndex int) bool { + switch ruleIndex { + case 32: + var t *ExprContext = nil + if localctx != nil { + t = localctx.(*ExprContext) + } + return p.Expr_Sempred(t, predIndex) + + default: + panic("No predicate with index: " + fmt.Sprint(ruleIndex)) + } +} + +func (p *SQLiteParser) Expr_Sempred(localctx antlr.RuleContext, predIndex int) bool { + this := p + _ = this + + switch predIndex { + case 0: + return p.Precpred(p.GetParserRuleContext(), 20) + + case 1: + return p.Precpred(p.GetParserRuleContext(), 19) + + case 2: + return p.Precpred(p.GetParserRuleContext(), 18) + + case 3: + return p.Precpred(p.GetParserRuleContext(), 17) + + case 4: + return p.Precpred(p.GetParserRuleContext(), 16) + + case 5: + return p.Precpred(p.GetParserRuleContext(), 15) + + case 6: + return p.Precpred(p.GetParserRuleContext(), 14) + + case 7: + return p.Precpred(p.GetParserRuleContext(), 13) + + case 8: + return p.Precpred(p.GetParserRuleContext(), 6) + + case 9: + return p.Precpred(p.GetParserRuleContext(), 5) + + case 10: + return p.Precpred(p.GetParserRuleContext(), 9) + + case 11: + return p.Precpred(p.GetParserRuleContext(), 8) + + case 12: + return p.Precpred(p.GetParserRuleContext(), 7) + + case 13: + return p.Precpred(p.GetParserRuleContext(), 4) + + default: + panic("No predicate with index: " + fmt.Sprint(predIndex)) + } +} diff --git a/vendor/github.com/libsql/sqlite-antlr4-parser/sqliteparser/sqliteparser_base_listener.go b/vendor/github.com/libsql/sqlite-antlr4-parser/sqliteparser/sqliteparser_base_listener.go index 7ba8689713..bb23712c76 100644 --- a/vendor/github.com/libsql/sqlite-antlr4-parser/sqliteparser/sqliteparser_base_listener.go +++ b/vendor/github.com/libsql/sqlite-antlr4-parser/sqliteparser/sqliteparser_base_listener.go @@ -1,708 +1,708 @@ -// Code generated from SQLiteParser.g4 by ANTLR 4.12.0. DO NOT EDIT. - -package sqliteparser // SQLiteParser -import "github.com/antlr/antlr4/runtime/Go/antlr/v4" - -// BaseSQLiteParserListener is a complete listener for a parse tree produced by SQLiteParser. -type BaseSQLiteParserListener struct{} - -var _ SQLiteParserListener = &BaseSQLiteParserListener{} - -// VisitTerminal is called when a terminal node is visited. -func (s *BaseSQLiteParserListener) VisitTerminal(node antlr.TerminalNode) {} - -// VisitErrorNode is called when an error node is visited. -func (s *BaseSQLiteParserListener) VisitErrorNode(node antlr.ErrorNode) {} - -// EnterEveryRule is called when any rule is entered. -func (s *BaseSQLiteParserListener) EnterEveryRule(ctx antlr.ParserRuleContext) {} - -// ExitEveryRule is called when any rule is exited. -func (s *BaseSQLiteParserListener) ExitEveryRule(ctx antlr.ParserRuleContext) {} - -// EnterParse is called when production parse is entered. -func (s *BaseSQLiteParserListener) EnterParse(ctx *ParseContext) {} - -// ExitParse is called when production parse is exited. -func (s *BaseSQLiteParserListener) ExitParse(ctx *ParseContext) {} - -// EnterSql_stmt_list is called when production sql_stmt_list is entered. -func (s *BaseSQLiteParserListener) EnterSql_stmt_list(ctx *Sql_stmt_listContext) {} - -// ExitSql_stmt_list is called when production sql_stmt_list is exited. -func (s *BaseSQLiteParserListener) ExitSql_stmt_list(ctx *Sql_stmt_listContext) {} - -// EnterSql_stmt is called when production sql_stmt is entered. -func (s *BaseSQLiteParserListener) EnterSql_stmt(ctx *Sql_stmtContext) {} - -// ExitSql_stmt is called when production sql_stmt is exited. -func (s *BaseSQLiteParserListener) ExitSql_stmt(ctx *Sql_stmtContext) {} - -// EnterAlter_table_stmt is called when production alter_table_stmt is entered. -func (s *BaseSQLiteParserListener) EnterAlter_table_stmt(ctx *Alter_table_stmtContext) {} - -// ExitAlter_table_stmt is called when production alter_table_stmt is exited. -func (s *BaseSQLiteParserListener) ExitAlter_table_stmt(ctx *Alter_table_stmtContext) {} - -// EnterAnalyze_stmt is called when production analyze_stmt is entered. -func (s *BaseSQLiteParserListener) EnterAnalyze_stmt(ctx *Analyze_stmtContext) {} - -// ExitAnalyze_stmt is called when production analyze_stmt is exited. -func (s *BaseSQLiteParserListener) ExitAnalyze_stmt(ctx *Analyze_stmtContext) {} - -// EnterAttach_stmt is called when production attach_stmt is entered. -func (s *BaseSQLiteParserListener) EnterAttach_stmt(ctx *Attach_stmtContext) {} - -// ExitAttach_stmt is called when production attach_stmt is exited. -func (s *BaseSQLiteParserListener) ExitAttach_stmt(ctx *Attach_stmtContext) {} - -// EnterBegin_stmt is called when production begin_stmt is entered. -func (s *BaseSQLiteParserListener) EnterBegin_stmt(ctx *Begin_stmtContext) {} - -// ExitBegin_stmt is called when production begin_stmt is exited. -func (s *BaseSQLiteParserListener) ExitBegin_stmt(ctx *Begin_stmtContext) {} - -// EnterCommit_stmt is called when production commit_stmt is entered. -func (s *BaseSQLiteParserListener) EnterCommit_stmt(ctx *Commit_stmtContext) {} - -// ExitCommit_stmt is called when production commit_stmt is exited. -func (s *BaseSQLiteParserListener) ExitCommit_stmt(ctx *Commit_stmtContext) {} - -// EnterRollback_stmt is called when production rollback_stmt is entered. -func (s *BaseSQLiteParserListener) EnterRollback_stmt(ctx *Rollback_stmtContext) {} - -// ExitRollback_stmt is called when production rollback_stmt is exited. -func (s *BaseSQLiteParserListener) ExitRollback_stmt(ctx *Rollback_stmtContext) {} - -// EnterSavepoint_stmt is called when production savepoint_stmt is entered. -func (s *BaseSQLiteParserListener) EnterSavepoint_stmt(ctx *Savepoint_stmtContext) {} - -// ExitSavepoint_stmt is called when production savepoint_stmt is exited. -func (s *BaseSQLiteParserListener) ExitSavepoint_stmt(ctx *Savepoint_stmtContext) {} - -// EnterRelease_stmt is called when production release_stmt is entered. -func (s *BaseSQLiteParserListener) EnterRelease_stmt(ctx *Release_stmtContext) {} - -// ExitRelease_stmt is called when production release_stmt is exited. -func (s *BaseSQLiteParserListener) ExitRelease_stmt(ctx *Release_stmtContext) {} - -// EnterCreate_index_stmt is called when production create_index_stmt is entered. -func (s *BaseSQLiteParserListener) EnterCreate_index_stmt(ctx *Create_index_stmtContext) {} - -// ExitCreate_index_stmt is called when production create_index_stmt is exited. -func (s *BaseSQLiteParserListener) ExitCreate_index_stmt(ctx *Create_index_stmtContext) {} - -// EnterIndexed_column is called when production indexed_column is entered. -func (s *BaseSQLiteParserListener) EnterIndexed_column(ctx *Indexed_columnContext) {} - -// ExitIndexed_column is called when production indexed_column is exited. -func (s *BaseSQLiteParserListener) ExitIndexed_column(ctx *Indexed_columnContext) {} - -// EnterCreate_table_stmt is called when production create_table_stmt is entered. -func (s *BaseSQLiteParserListener) EnterCreate_table_stmt(ctx *Create_table_stmtContext) {} - -// ExitCreate_table_stmt is called when production create_table_stmt is exited. -func (s *BaseSQLiteParserListener) ExitCreate_table_stmt(ctx *Create_table_stmtContext) {} - -// EnterColumn_def is called when production column_def is entered. -func (s *BaseSQLiteParserListener) EnterColumn_def(ctx *Column_defContext) {} - -// ExitColumn_def is called when production column_def is exited. -func (s *BaseSQLiteParserListener) ExitColumn_def(ctx *Column_defContext) {} - -// EnterType_name is called when production type_name is entered. -func (s *BaseSQLiteParserListener) EnterType_name(ctx *Type_nameContext) {} - -// ExitType_name is called when production type_name is exited. -func (s *BaseSQLiteParserListener) ExitType_name(ctx *Type_nameContext) {} - -// EnterColumn_constraint is called when production column_constraint is entered. -func (s *BaseSQLiteParserListener) EnterColumn_constraint(ctx *Column_constraintContext) {} - -// ExitColumn_constraint is called when production column_constraint is exited. -func (s *BaseSQLiteParserListener) ExitColumn_constraint(ctx *Column_constraintContext) {} - -// EnterSigned_number is called when production signed_number is entered. -func (s *BaseSQLiteParserListener) EnterSigned_number(ctx *Signed_numberContext) {} - -// ExitSigned_number is called when production signed_number is exited. -func (s *BaseSQLiteParserListener) ExitSigned_number(ctx *Signed_numberContext) {} - -// EnterTable_constraint is called when production table_constraint is entered. -func (s *BaseSQLiteParserListener) EnterTable_constraint(ctx *Table_constraintContext) {} - -// ExitTable_constraint is called when production table_constraint is exited. -func (s *BaseSQLiteParserListener) ExitTable_constraint(ctx *Table_constraintContext) {} - -// EnterForeign_key_clause is called when production foreign_key_clause is entered. -func (s *BaseSQLiteParserListener) EnterForeign_key_clause(ctx *Foreign_key_clauseContext) {} - -// ExitForeign_key_clause is called when production foreign_key_clause is exited. -func (s *BaseSQLiteParserListener) ExitForeign_key_clause(ctx *Foreign_key_clauseContext) {} - -// EnterConflict_clause is called when production conflict_clause is entered. -func (s *BaseSQLiteParserListener) EnterConflict_clause(ctx *Conflict_clauseContext) {} - -// ExitConflict_clause is called when production conflict_clause is exited. -func (s *BaseSQLiteParserListener) ExitConflict_clause(ctx *Conflict_clauseContext) {} - -// EnterCreate_trigger_stmt is called when production create_trigger_stmt is entered. -func (s *BaseSQLiteParserListener) EnterCreate_trigger_stmt(ctx *Create_trigger_stmtContext) {} - -// ExitCreate_trigger_stmt is called when production create_trigger_stmt is exited. -func (s *BaseSQLiteParserListener) ExitCreate_trigger_stmt(ctx *Create_trigger_stmtContext) {} - -// EnterCreate_view_stmt is called when production create_view_stmt is entered. -func (s *BaseSQLiteParserListener) EnterCreate_view_stmt(ctx *Create_view_stmtContext) {} - -// ExitCreate_view_stmt is called when production create_view_stmt is exited. -func (s *BaseSQLiteParserListener) ExitCreate_view_stmt(ctx *Create_view_stmtContext) {} - -// EnterCreate_virtual_table_stmt is called when production create_virtual_table_stmt is entered. -func (s *BaseSQLiteParserListener) EnterCreate_virtual_table_stmt(ctx *Create_virtual_table_stmtContext) { -} - -// ExitCreate_virtual_table_stmt is called when production create_virtual_table_stmt is exited. -func (s *BaseSQLiteParserListener) ExitCreate_virtual_table_stmt(ctx *Create_virtual_table_stmtContext) { -} - -// EnterWith_clause is called when production with_clause is entered. -func (s *BaseSQLiteParserListener) EnterWith_clause(ctx *With_clauseContext) {} - -// ExitWith_clause is called when production with_clause is exited. -func (s *BaseSQLiteParserListener) ExitWith_clause(ctx *With_clauseContext) {} - -// EnterCte_table_name is called when production cte_table_name is entered. -func (s *BaseSQLiteParserListener) EnterCte_table_name(ctx *Cte_table_nameContext) {} - -// ExitCte_table_name is called when production cte_table_name is exited. -func (s *BaseSQLiteParserListener) ExitCte_table_name(ctx *Cte_table_nameContext) {} - -// EnterRecursive_cte is called when production recursive_cte is entered. -func (s *BaseSQLiteParserListener) EnterRecursive_cte(ctx *Recursive_cteContext) {} - -// ExitRecursive_cte is called when production recursive_cte is exited. -func (s *BaseSQLiteParserListener) ExitRecursive_cte(ctx *Recursive_cteContext) {} - -// EnterCommon_table_expression is called when production common_table_expression is entered. -func (s *BaseSQLiteParserListener) EnterCommon_table_expression(ctx *Common_table_expressionContext) { -} - -// ExitCommon_table_expression is called when production common_table_expression is exited. -func (s *BaseSQLiteParserListener) ExitCommon_table_expression(ctx *Common_table_expressionContext) {} - -// EnterDelete_stmt is called when production delete_stmt is entered. -func (s *BaseSQLiteParserListener) EnterDelete_stmt(ctx *Delete_stmtContext) {} - -// ExitDelete_stmt is called when production delete_stmt is exited. -func (s *BaseSQLiteParserListener) ExitDelete_stmt(ctx *Delete_stmtContext) {} - -// EnterDelete_stmt_limited is called when production delete_stmt_limited is entered. -func (s *BaseSQLiteParserListener) EnterDelete_stmt_limited(ctx *Delete_stmt_limitedContext) {} - -// ExitDelete_stmt_limited is called when production delete_stmt_limited is exited. -func (s *BaseSQLiteParserListener) ExitDelete_stmt_limited(ctx *Delete_stmt_limitedContext) {} - -// EnterDetach_stmt is called when production detach_stmt is entered. -func (s *BaseSQLiteParserListener) EnterDetach_stmt(ctx *Detach_stmtContext) {} - -// ExitDetach_stmt is called when production detach_stmt is exited. -func (s *BaseSQLiteParserListener) ExitDetach_stmt(ctx *Detach_stmtContext) {} - -// EnterDrop_stmt is called when production drop_stmt is entered. -func (s *BaseSQLiteParserListener) EnterDrop_stmt(ctx *Drop_stmtContext) {} - -// ExitDrop_stmt is called when production drop_stmt is exited. -func (s *BaseSQLiteParserListener) ExitDrop_stmt(ctx *Drop_stmtContext) {} - -// EnterExpr is called when production expr is entered. -func (s *BaseSQLiteParserListener) EnterExpr(ctx *ExprContext) {} - -// ExitExpr is called when production expr is exited. -func (s *BaseSQLiteParserListener) ExitExpr(ctx *ExprContext) {} - -// EnterRaise_function is called when production raise_function is entered. -func (s *BaseSQLiteParserListener) EnterRaise_function(ctx *Raise_functionContext) {} - -// ExitRaise_function is called when production raise_function is exited. -func (s *BaseSQLiteParserListener) ExitRaise_function(ctx *Raise_functionContext) {} - -// EnterLiteral_value is called when production literal_value is entered. -func (s *BaseSQLiteParserListener) EnterLiteral_value(ctx *Literal_valueContext) {} - -// ExitLiteral_value is called when production literal_value is exited. -func (s *BaseSQLiteParserListener) ExitLiteral_value(ctx *Literal_valueContext) {} - -// EnterValue_row is called when production value_row is entered. -func (s *BaseSQLiteParserListener) EnterValue_row(ctx *Value_rowContext) {} - -// ExitValue_row is called when production value_row is exited. -func (s *BaseSQLiteParserListener) ExitValue_row(ctx *Value_rowContext) {} - -// EnterValues_clause is called when production values_clause is entered. -func (s *BaseSQLiteParserListener) EnterValues_clause(ctx *Values_clauseContext) {} - -// ExitValues_clause is called when production values_clause is exited. -func (s *BaseSQLiteParserListener) ExitValues_clause(ctx *Values_clauseContext) {} - -// EnterInsert_stmt is called when production insert_stmt is entered. -func (s *BaseSQLiteParserListener) EnterInsert_stmt(ctx *Insert_stmtContext) {} - -// ExitInsert_stmt is called when production insert_stmt is exited. -func (s *BaseSQLiteParserListener) ExitInsert_stmt(ctx *Insert_stmtContext) {} - -// EnterReturning_clause is called when production returning_clause is entered. -func (s *BaseSQLiteParserListener) EnterReturning_clause(ctx *Returning_clauseContext) {} - -// ExitReturning_clause is called when production returning_clause is exited. -func (s *BaseSQLiteParserListener) ExitReturning_clause(ctx *Returning_clauseContext) {} - -// EnterUpsert_clause is called when production upsert_clause is entered. -func (s *BaseSQLiteParserListener) EnterUpsert_clause(ctx *Upsert_clauseContext) {} - -// ExitUpsert_clause is called when production upsert_clause is exited. -func (s *BaseSQLiteParserListener) ExitUpsert_clause(ctx *Upsert_clauseContext) {} - -// EnterPragma_stmt is called when production pragma_stmt is entered. -func (s *BaseSQLiteParserListener) EnterPragma_stmt(ctx *Pragma_stmtContext) {} - -// ExitPragma_stmt is called when production pragma_stmt is exited. -func (s *BaseSQLiteParserListener) ExitPragma_stmt(ctx *Pragma_stmtContext) {} - -// EnterPragma_value is called when production pragma_value is entered. -func (s *BaseSQLiteParserListener) EnterPragma_value(ctx *Pragma_valueContext) {} - -// ExitPragma_value is called when production pragma_value is exited. -func (s *BaseSQLiteParserListener) ExitPragma_value(ctx *Pragma_valueContext) {} - -// EnterReindex_stmt is called when production reindex_stmt is entered. -func (s *BaseSQLiteParserListener) EnterReindex_stmt(ctx *Reindex_stmtContext) {} - -// ExitReindex_stmt is called when production reindex_stmt is exited. -func (s *BaseSQLiteParserListener) ExitReindex_stmt(ctx *Reindex_stmtContext) {} - -// EnterSelect_stmt is called when production select_stmt is entered. -func (s *BaseSQLiteParserListener) EnterSelect_stmt(ctx *Select_stmtContext) {} - -// ExitSelect_stmt is called when production select_stmt is exited. -func (s *BaseSQLiteParserListener) ExitSelect_stmt(ctx *Select_stmtContext) {} - -// EnterJoin_clause is called when production join_clause is entered. -func (s *BaseSQLiteParserListener) EnterJoin_clause(ctx *Join_clauseContext) {} - -// ExitJoin_clause is called when production join_clause is exited. -func (s *BaseSQLiteParserListener) ExitJoin_clause(ctx *Join_clauseContext) {} - -// EnterSelect_core is called when production select_core is entered. -func (s *BaseSQLiteParserListener) EnterSelect_core(ctx *Select_coreContext) {} - -// ExitSelect_core is called when production select_core is exited. -func (s *BaseSQLiteParserListener) ExitSelect_core(ctx *Select_coreContext) {} - -// EnterFactored_select_stmt is called when production factored_select_stmt is entered. -func (s *BaseSQLiteParserListener) EnterFactored_select_stmt(ctx *Factored_select_stmtContext) {} - -// ExitFactored_select_stmt is called when production factored_select_stmt is exited. -func (s *BaseSQLiteParserListener) ExitFactored_select_stmt(ctx *Factored_select_stmtContext) {} - -// EnterSimple_select_stmt is called when production simple_select_stmt is entered. -func (s *BaseSQLiteParserListener) EnterSimple_select_stmt(ctx *Simple_select_stmtContext) {} - -// ExitSimple_select_stmt is called when production simple_select_stmt is exited. -func (s *BaseSQLiteParserListener) ExitSimple_select_stmt(ctx *Simple_select_stmtContext) {} - -// EnterCompound_select_stmt is called when production compound_select_stmt is entered. -func (s *BaseSQLiteParserListener) EnterCompound_select_stmt(ctx *Compound_select_stmtContext) {} - -// ExitCompound_select_stmt is called when production compound_select_stmt is exited. -func (s *BaseSQLiteParserListener) ExitCompound_select_stmt(ctx *Compound_select_stmtContext) {} - -// EnterTable_or_subquery is called when production table_or_subquery is entered. -func (s *BaseSQLiteParserListener) EnterTable_or_subquery(ctx *Table_or_subqueryContext) {} - -// ExitTable_or_subquery is called when production table_or_subquery is exited. -func (s *BaseSQLiteParserListener) ExitTable_or_subquery(ctx *Table_or_subqueryContext) {} - -// EnterResult_column is called when production result_column is entered. -func (s *BaseSQLiteParserListener) EnterResult_column(ctx *Result_columnContext) {} - -// ExitResult_column is called when production result_column is exited. -func (s *BaseSQLiteParserListener) ExitResult_column(ctx *Result_columnContext) {} - -// EnterJoin_operator is called when production join_operator is entered. -func (s *BaseSQLiteParserListener) EnterJoin_operator(ctx *Join_operatorContext) {} - -// ExitJoin_operator is called when production join_operator is exited. -func (s *BaseSQLiteParserListener) ExitJoin_operator(ctx *Join_operatorContext) {} - -// EnterJoin_constraint is called when production join_constraint is entered. -func (s *BaseSQLiteParserListener) EnterJoin_constraint(ctx *Join_constraintContext) {} - -// ExitJoin_constraint is called when production join_constraint is exited. -func (s *BaseSQLiteParserListener) ExitJoin_constraint(ctx *Join_constraintContext) {} - -// EnterCompound_operator is called when production compound_operator is entered. -func (s *BaseSQLiteParserListener) EnterCompound_operator(ctx *Compound_operatorContext) {} - -// ExitCompound_operator is called when production compound_operator is exited. -func (s *BaseSQLiteParserListener) ExitCompound_operator(ctx *Compound_operatorContext) {} - -// EnterUpdate_stmt is called when production update_stmt is entered. -func (s *BaseSQLiteParserListener) EnterUpdate_stmt(ctx *Update_stmtContext) {} - -// ExitUpdate_stmt is called when production update_stmt is exited. -func (s *BaseSQLiteParserListener) ExitUpdate_stmt(ctx *Update_stmtContext) {} - -// EnterColumn_name_list is called when production column_name_list is entered. -func (s *BaseSQLiteParserListener) EnterColumn_name_list(ctx *Column_name_listContext) {} - -// ExitColumn_name_list is called when production column_name_list is exited. -func (s *BaseSQLiteParserListener) ExitColumn_name_list(ctx *Column_name_listContext) {} - -// EnterUpdate_stmt_limited is called when production update_stmt_limited is entered. -func (s *BaseSQLiteParserListener) EnterUpdate_stmt_limited(ctx *Update_stmt_limitedContext) {} - -// ExitUpdate_stmt_limited is called when production update_stmt_limited is exited. -func (s *BaseSQLiteParserListener) ExitUpdate_stmt_limited(ctx *Update_stmt_limitedContext) {} - -// EnterQualified_table_name is called when production qualified_table_name is entered. -func (s *BaseSQLiteParserListener) EnterQualified_table_name(ctx *Qualified_table_nameContext) {} - -// ExitQualified_table_name is called when production qualified_table_name is exited. -func (s *BaseSQLiteParserListener) ExitQualified_table_name(ctx *Qualified_table_nameContext) {} - -// EnterVacuum_stmt is called when production vacuum_stmt is entered. -func (s *BaseSQLiteParserListener) EnterVacuum_stmt(ctx *Vacuum_stmtContext) {} - -// ExitVacuum_stmt is called when production vacuum_stmt is exited. -func (s *BaseSQLiteParserListener) ExitVacuum_stmt(ctx *Vacuum_stmtContext) {} - -// EnterFilter_clause is called when production filter_clause is entered. -func (s *BaseSQLiteParserListener) EnterFilter_clause(ctx *Filter_clauseContext) {} - -// ExitFilter_clause is called when production filter_clause is exited. -func (s *BaseSQLiteParserListener) ExitFilter_clause(ctx *Filter_clauseContext) {} - -// EnterWindow_defn is called when production window_defn is entered. -func (s *BaseSQLiteParserListener) EnterWindow_defn(ctx *Window_defnContext) {} - -// ExitWindow_defn is called when production window_defn is exited. -func (s *BaseSQLiteParserListener) ExitWindow_defn(ctx *Window_defnContext) {} - -// EnterOver_clause is called when production over_clause is entered. -func (s *BaseSQLiteParserListener) EnterOver_clause(ctx *Over_clauseContext) {} - -// ExitOver_clause is called when production over_clause is exited. -func (s *BaseSQLiteParserListener) ExitOver_clause(ctx *Over_clauseContext) {} - -// EnterFrame_spec is called when production frame_spec is entered. -func (s *BaseSQLiteParserListener) EnterFrame_spec(ctx *Frame_specContext) {} - -// ExitFrame_spec is called when production frame_spec is exited. -func (s *BaseSQLiteParserListener) ExitFrame_spec(ctx *Frame_specContext) {} - -// EnterFrame_clause is called when production frame_clause is entered. -func (s *BaseSQLiteParserListener) EnterFrame_clause(ctx *Frame_clauseContext) {} - -// ExitFrame_clause is called when production frame_clause is exited. -func (s *BaseSQLiteParserListener) ExitFrame_clause(ctx *Frame_clauseContext) {} - -// EnterSimple_function_invocation is called when production simple_function_invocation is entered. -func (s *BaseSQLiteParserListener) EnterSimple_function_invocation(ctx *Simple_function_invocationContext) { -} - -// ExitSimple_function_invocation is called when production simple_function_invocation is exited. -func (s *BaseSQLiteParserListener) ExitSimple_function_invocation(ctx *Simple_function_invocationContext) { -} - -// EnterAggregate_function_invocation is called when production aggregate_function_invocation is entered. -func (s *BaseSQLiteParserListener) EnterAggregate_function_invocation(ctx *Aggregate_function_invocationContext) { -} - -// ExitAggregate_function_invocation is called when production aggregate_function_invocation is exited. -func (s *BaseSQLiteParserListener) ExitAggregate_function_invocation(ctx *Aggregate_function_invocationContext) { -} - -// EnterWindow_function_invocation is called when production window_function_invocation is entered. -func (s *BaseSQLiteParserListener) EnterWindow_function_invocation(ctx *Window_function_invocationContext) { -} - -// ExitWindow_function_invocation is called when production window_function_invocation is exited. -func (s *BaseSQLiteParserListener) ExitWindow_function_invocation(ctx *Window_function_invocationContext) { -} - -// EnterCommon_table_stmt is called when production common_table_stmt is entered. -func (s *BaseSQLiteParserListener) EnterCommon_table_stmt(ctx *Common_table_stmtContext) {} - -// ExitCommon_table_stmt is called when production common_table_stmt is exited. -func (s *BaseSQLiteParserListener) ExitCommon_table_stmt(ctx *Common_table_stmtContext) {} - -// EnterOrder_by_stmt is called when production order_by_stmt is entered. -func (s *BaseSQLiteParserListener) EnterOrder_by_stmt(ctx *Order_by_stmtContext) {} - -// ExitOrder_by_stmt is called when production order_by_stmt is exited. -func (s *BaseSQLiteParserListener) ExitOrder_by_stmt(ctx *Order_by_stmtContext) {} - -// EnterLimit_stmt is called when production limit_stmt is entered. -func (s *BaseSQLiteParserListener) EnterLimit_stmt(ctx *Limit_stmtContext) {} - -// ExitLimit_stmt is called when production limit_stmt is exited. -func (s *BaseSQLiteParserListener) ExitLimit_stmt(ctx *Limit_stmtContext) {} - -// EnterOrdering_term is called when production ordering_term is entered. -func (s *BaseSQLiteParserListener) EnterOrdering_term(ctx *Ordering_termContext) {} - -// ExitOrdering_term is called when production ordering_term is exited. -func (s *BaseSQLiteParserListener) ExitOrdering_term(ctx *Ordering_termContext) {} - -// EnterAsc_desc is called when production asc_desc is entered. -func (s *BaseSQLiteParserListener) EnterAsc_desc(ctx *Asc_descContext) {} - -// ExitAsc_desc is called when production asc_desc is exited. -func (s *BaseSQLiteParserListener) ExitAsc_desc(ctx *Asc_descContext) {} - -// EnterFrame_left is called when production frame_left is entered. -func (s *BaseSQLiteParserListener) EnterFrame_left(ctx *Frame_leftContext) {} - -// ExitFrame_left is called when production frame_left is exited. -func (s *BaseSQLiteParserListener) ExitFrame_left(ctx *Frame_leftContext) {} - -// EnterFrame_right is called when production frame_right is entered. -func (s *BaseSQLiteParserListener) EnterFrame_right(ctx *Frame_rightContext) {} - -// ExitFrame_right is called when production frame_right is exited. -func (s *BaseSQLiteParserListener) ExitFrame_right(ctx *Frame_rightContext) {} - -// EnterFrame_single is called when production frame_single is entered. -func (s *BaseSQLiteParserListener) EnterFrame_single(ctx *Frame_singleContext) {} - -// ExitFrame_single is called when production frame_single is exited. -func (s *BaseSQLiteParserListener) ExitFrame_single(ctx *Frame_singleContext) {} - -// EnterWindow_function is called when production window_function is entered. -func (s *BaseSQLiteParserListener) EnterWindow_function(ctx *Window_functionContext) {} - -// ExitWindow_function is called when production window_function is exited. -func (s *BaseSQLiteParserListener) ExitWindow_function(ctx *Window_functionContext) {} - -// EnterOffset is called when production offset is entered. -func (s *BaseSQLiteParserListener) EnterOffset(ctx *OffsetContext) {} - -// ExitOffset is called when production offset is exited. -func (s *BaseSQLiteParserListener) ExitOffset(ctx *OffsetContext) {} - -// EnterDefault_value is called when production default_value is entered. -func (s *BaseSQLiteParserListener) EnterDefault_value(ctx *Default_valueContext) {} - -// ExitDefault_value is called when production default_value is exited. -func (s *BaseSQLiteParserListener) ExitDefault_value(ctx *Default_valueContext) {} - -// EnterPartition_by is called when production partition_by is entered. -func (s *BaseSQLiteParserListener) EnterPartition_by(ctx *Partition_byContext) {} - -// ExitPartition_by is called when production partition_by is exited. -func (s *BaseSQLiteParserListener) ExitPartition_by(ctx *Partition_byContext) {} - -// EnterOrder_by_expr is called when production order_by_expr is entered. -func (s *BaseSQLiteParserListener) EnterOrder_by_expr(ctx *Order_by_exprContext) {} - -// ExitOrder_by_expr is called when production order_by_expr is exited. -func (s *BaseSQLiteParserListener) ExitOrder_by_expr(ctx *Order_by_exprContext) {} - -// EnterOrder_by_expr_asc_desc is called when production order_by_expr_asc_desc is entered. -func (s *BaseSQLiteParserListener) EnterOrder_by_expr_asc_desc(ctx *Order_by_expr_asc_descContext) {} - -// ExitOrder_by_expr_asc_desc is called when production order_by_expr_asc_desc is exited. -func (s *BaseSQLiteParserListener) ExitOrder_by_expr_asc_desc(ctx *Order_by_expr_asc_descContext) {} - -// EnterExpr_asc_desc is called when production expr_asc_desc is entered. -func (s *BaseSQLiteParserListener) EnterExpr_asc_desc(ctx *Expr_asc_descContext) {} - -// ExitExpr_asc_desc is called when production expr_asc_desc is exited. -func (s *BaseSQLiteParserListener) ExitExpr_asc_desc(ctx *Expr_asc_descContext) {} - -// EnterInitial_select is called when production initial_select is entered. -func (s *BaseSQLiteParserListener) EnterInitial_select(ctx *Initial_selectContext) {} - -// ExitInitial_select is called when production initial_select is exited. -func (s *BaseSQLiteParserListener) ExitInitial_select(ctx *Initial_selectContext) {} - -// EnterRecursive_select is called when production recursive_select is entered. -func (s *BaseSQLiteParserListener) EnterRecursive_select(ctx *Recursive_selectContext) {} - -// ExitRecursive_select is called when production recursive_select is exited. -func (s *BaseSQLiteParserListener) ExitRecursive_select(ctx *Recursive_selectContext) {} - -// EnterUnary_operator is called when production unary_operator is entered. -func (s *BaseSQLiteParserListener) EnterUnary_operator(ctx *Unary_operatorContext) {} - -// ExitUnary_operator is called when production unary_operator is exited. -func (s *BaseSQLiteParserListener) ExitUnary_operator(ctx *Unary_operatorContext) {} - -// EnterError_message is called when production error_message is entered. -func (s *BaseSQLiteParserListener) EnterError_message(ctx *Error_messageContext) {} - -// ExitError_message is called when production error_message is exited. -func (s *BaseSQLiteParserListener) ExitError_message(ctx *Error_messageContext) {} - -// EnterModule_argument is called when production module_argument is entered. -func (s *BaseSQLiteParserListener) EnterModule_argument(ctx *Module_argumentContext) {} - -// ExitModule_argument is called when production module_argument is exited. -func (s *BaseSQLiteParserListener) ExitModule_argument(ctx *Module_argumentContext) {} - -// EnterColumn_alias is called when production column_alias is entered. -func (s *BaseSQLiteParserListener) EnterColumn_alias(ctx *Column_aliasContext) {} - -// ExitColumn_alias is called when production column_alias is exited. -func (s *BaseSQLiteParserListener) ExitColumn_alias(ctx *Column_aliasContext) {} - -// EnterKeyword is called when production keyword is entered. -func (s *BaseSQLiteParserListener) EnterKeyword(ctx *KeywordContext) {} - -// ExitKeyword is called when production keyword is exited. -func (s *BaseSQLiteParserListener) ExitKeyword(ctx *KeywordContext) {} - -// EnterName is called when production name is entered. -func (s *BaseSQLiteParserListener) EnterName(ctx *NameContext) {} - -// ExitName is called when production name is exited. -func (s *BaseSQLiteParserListener) ExitName(ctx *NameContext) {} - -// EnterFunction_name is called when production function_name is entered. -func (s *BaseSQLiteParserListener) EnterFunction_name(ctx *Function_nameContext) {} - -// ExitFunction_name is called when production function_name is exited. -func (s *BaseSQLiteParserListener) ExitFunction_name(ctx *Function_nameContext) {} - -// EnterSchema_name is called when production schema_name is entered. -func (s *BaseSQLiteParserListener) EnterSchema_name(ctx *Schema_nameContext) {} - -// ExitSchema_name is called when production schema_name is exited. -func (s *BaseSQLiteParserListener) ExitSchema_name(ctx *Schema_nameContext) {} - -// EnterTable_name is called when production table_name is entered. -func (s *BaseSQLiteParserListener) EnterTable_name(ctx *Table_nameContext) {} - -// ExitTable_name is called when production table_name is exited. -func (s *BaseSQLiteParserListener) ExitTable_name(ctx *Table_nameContext) {} - -// EnterTable_or_index_name is called when production table_or_index_name is entered. -func (s *BaseSQLiteParserListener) EnterTable_or_index_name(ctx *Table_or_index_nameContext) {} - -// ExitTable_or_index_name is called when production table_or_index_name is exited. -func (s *BaseSQLiteParserListener) ExitTable_or_index_name(ctx *Table_or_index_nameContext) {} - -// EnterColumn_name is called when production column_name is entered. -func (s *BaseSQLiteParserListener) EnterColumn_name(ctx *Column_nameContext) {} - -// ExitColumn_name is called when production column_name is exited. -func (s *BaseSQLiteParserListener) ExitColumn_name(ctx *Column_nameContext) {} - -// EnterCollation_name is called when production collation_name is entered. -func (s *BaseSQLiteParserListener) EnterCollation_name(ctx *Collation_nameContext) {} - -// ExitCollation_name is called when production collation_name is exited. -func (s *BaseSQLiteParserListener) ExitCollation_name(ctx *Collation_nameContext) {} - -// EnterForeign_table is called when production foreign_table is entered. -func (s *BaseSQLiteParserListener) EnterForeign_table(ctx *Foreign_tableContext) {} - -// ExitForeign_table is called when production foreign_table is exited. -func (s *BaseSQLiteParserListener) ExitForeign_table(ctx *Foreign_tableContext) {} - -// EnterIndex_name is called when production index_name is entered. -func (s *BaseSQLiteParserListener) EnterIndex_name(ctx *Index_nameContext) {} - -// ExitIndex_name is called when production index_name is exited. -func (s *BaseSQLiteParserListener) ExitIndex_name(ctx *Index_nameContext) {} - -// EnterTrigger_name is called when production trigger_name is entered. -func (s *BaseSQLiteParserListener) EnterTrigger_name(ctx *Trigger_nameContext) {} - -// ExitTrigger_name is called when production trigger_name is exited. -func (s *BaseSQLiteParserListener) ExitTrigger_name(ctx *Trigger_nameContext) {} - -// EnterView_name is called when production view_name is entered. -func (s *BaseSQLiteParserListener) EnterView_name(ctx *View_nameContext) {} - -// ExitView_name is called when production view_name is exited. -func (s *BaseSQLiteParserListener) ExitView_name(ctx *View_nameContext) {} - -// EnterModule_name is called when production module_name is entered. -func (s *BaseSQLiteParserListener) EnterModule_name(ctx *Module_nameContext) {} - -// ExitModule_name is called when production module_name is exited. -func (s *BaseSQLiteParserListener) ExitModule_name(ctx *Module_nameContext) {} - -// EnterPragma_name is called when production pragma_name is entered. -func (s *BaseSQLiteParserListener) EnterPragma_name(ctx *Pragma_nameContext) {} - -// ExitPragma_name is called when production pragma_name is exited. -func (s *BaseSQLiteParserListener) ExitPragma_name(ctx *Pragma_nameContext) {} - -// EnterSavepoint_name is called when production savepoint_name is entered. -func (s *BaseSQLiteParserListener) EnterSavepoint_name(ctx *Savepoint_nameContext) {} - -// ExitSavepoint_name is called when production savepoint_name is exited. -func (s *BaseSQLiteParserListener) ExitSavepoint_name(ctx *Savepoint_nameContext) {} - -// EnterTable_alias is called when production table_alias is entered. -func (s *BaseSQLiteParserListener) EnterTable_alias(ctx *Table_aliasContext) {} - -// ExitTable_alias is called when production table_alias is exited. -func (s *BaseSQLiteParserListener) ExitTable_alias(ctx *Table_aliasContext) {} - -// EnterTransaction_name is called when production transaction_name is entered. -func (s *BaseSQLiteParserListener) EnterTransaction_name(ctx *Transaction_nameContext) {} - -// ExitTransaction_name is called when production transaction_name is exited. -func (s *BaseSQLiteParserListener) ExitTransaction_name(ctx *Transaction_nameContext) {} - -// EnterWindow_name is called when production window_name is entered. -func (s *BaseSQLiteParserListener) EnterWindow_name(ctx *Window_nameContext) {} - -// ExitWindow_name is called when production window_name is exited. -func (s *BaseSQLiteParserListener) ExitWindow_name(ctx *Window_nameContext) {} - -// EnterAlias is called when production alias is entered. -func (s *BaseSQLiteParserListener) EnterAlias(ctx *AliasContext) {} - -// ExitAlias is called when production alias is exited. -func (s *BaseSQLiteParserListener) ExitAlias(ctx *AliasContext) {} - -// EnterFilename is called when production filename is entered. -func (s *BaseSQLiteParserListener) EnterFilename(ctx *FilenameContext) {} - -// ExitFilename is called when production filename is exited. -func (s *BaseSQLiteParserListener) ExitFilename(ctx *FilenameContext) {} - -// EnterBase_window_name is called when production base_window_name is entered. -func (s *BaseSQLiteParserListener) EnterBase_window_name(ctx *Base_window_nameContext) {} - -// ExitBase_window_name is called when production base_window_name is exited. -func (s *BaseSQLiteParserListener) ExitBase_window_name(ctx *Base_window_nameContext) {} - -// EnterSimple_func is called when production simple_func is entered. -func (s *BaseSQLiteParserListener) EnterSimple_func(ctx *Simple_funcContext) {} - -// ExitSimple_func is called when production simple_func is exited. -func (s *BaseSQLiteParserListener) ExitSimple_func(ctx *Simple_funcContext) {} - -// EnterAggregate_func is called when production aggregate_func is entered. -func (s *BaseSQLiteParserListener) EnterAggregate_func(ctx *Aggregate_funcContext) {} - -// ExitAggregate_func is called when production aggregate_func is exited. -func (s *BaseSQLiteParserListener) ExitAggregate_func(ctx *Aggregate_funcContext) {} - -// EnterTable_function_name is called when production table_function_name is entered. -func (s *BaseSQLiteParserListener) EnterTable_function_name(ctx *Table_function_nameContext) {} - -// ExitTable_function_name is called when production table_function_name is exited. -func (s *BaseSQLiteParserListener) ExitTable_function_name(ctx *Table_function_nameContext) {} - -// EnterAny_name is called when production any_name is entered. -func (s *BaseSQLiteParserListener) EnterAny_name(ctx *Any_nameContext) {} - -// ExitAny_name is called when production any_name is exited. -func (s *BaseSQLiteParserListener) ExitAny_name(ctx *Any_nameContext) {} +// Code generated from SQLiteParser.g4 by ANTLR 4.12.0. DO NOT EDIT. + +package sqliteparser // SQLiteParser +import "github.com/antlr/antlr4/runtime/Go/antlr/v4" + +// BaseSQLiteParserListener is a complete listener for a parse tree produced by SQLiteParser. +type BaseSQLiteParserListener struct{} + +var _ SQLiteParserListener = &BaseSQLiteParserListener{} + +// VisitTerminal is called when a terminal node is visited. +func (s *BaseSQLiteParserListener) VisitTerminal(node antlr.TerminalNode) {} + +// VisitErrorNode is called when an error node is visited. +func (s *BaseSQLiteParserListener) VisitErrorNode(node antlr.ErrorNode) {} + +// EnterEveryRule is called when any rule is entered. +func (s *BaseSQLiteParserListener) EnterEveryRule(ctx antlr.ParserRuleContext) {} + +// ExitEveryRule is called when any rule is exited. +func (s *BaseSQLiteParserListener) ExitEveryRule(ctx antlr.ParserRuleContext) {} + +// EnterParse is called when production parse is entered. +func (s *BaseSQLiteParserListener) EnterParse(ctx *ParseContext) {} + +// ExitParse is called when production parse is exited. +func (s *BaseSQLiteParserListener) ExitParse(ctx *ParseContext) {} + +// EnterSql_stmt_list is called when production sql_stmt_list is entered. +func (s *BaseSQLiteParserListener) EnterSql_stmt_list(ctx *Sql_stmt_listContext) {} + +// ExitSql_stmt_list is called when production sql_stmt_list is exited. +func (s *BaseSQLiteParserListener) ExitSql_stmt_list(ctx *Sql_stmt_listContext) {} + +// EnterSql_stmt is called when production sql_stmt is entered. +func (s *BaseSQLiteParserListener) EnterSql_stmt(ctx *Sql_stmtContext) {} + +// ExitSql_stmt is called when production sql_stmt is exited. +func (s *BaseSQLiteParserListener) ExitSql_stmt(ctx *Sql_stmtContext) {} + +// EnterAlter_table_stmt is called when production alter_table_stmt is entered. +func (s *BaseSQLiteParserListener) EnterAlter_table_stmt(ctx *Alter_table_stmtContext) {} + +// ExitAlter_table_stmt is called when production alter_table_stmt is exited. +func (s *BaseSQLiteParserListener) ExitAlter_table_stmt(ctx *Alter_table_stmtContext) {} + +// EnterAnalyze_stmt is called when production analyze_stmt is entered. +func (s *BaseSQLiteParserListener) EnterAnalyze_stmt(ctx *Analyze_stmtContext) {} + +// ExitAnalyze_stmt is called when production analyze_stmt is exited. +func (s *BaseSQLiteParserListener) ExitAnalyze_stmt(ctx *Analyze_stmtContext) {} + +// EnterAttach_stmt is called when production attach_stmt is entered. +func (s *BaseSQLiteParserListener) EnterAttach_stmt(ctx *Attach_stmtContext) {} + +// ExitAttach_stmt is called when production attach_stmt is exited. +func (s *BaseSQLiteParserListener) ExitAttach_stmt(ctx *Attach_stmtContext) {} + +// EnterBegin_stmt is called when production begin_stmt is entered. +func (s *BaseSQLiteParserListener) EnterBegin_stmt(ctx *Begin_stmtContext) {} + +// ExitBegin_stmt is called when production begin_stmt is exited. +func (s *BaseSQLiteParserListener) ExitBegin_stmt(ctx *Begin_stmtContext) {} + +// EnterCommit_stmt is called when production commit_stmt is entered. +func (s *BaseSQLiteParserListener) EnterCommit_stmt(ctx *Commit_stmtContext) {} + +// ExitCommit_stmt is called when production commit_stmt is exited. +func (s *BaseSQLiteParserListener) ExitCommit_stmt(ctx *Commit_stmtContext) {} + +// EnterRollback_stmt is called when production rollback_stmt is entered. +func (s *BaseSQLiteParserListener) EnterRollback_stmt(ctx *Rollback_stmtContext) {} + +// ExitRollback_stmt is called when production rollback_stmt is exited. +func (s *BaseSQLiteParserListener) ExitRollback_stmt(ctx *Rollback_stmtContext) {} + +// EnterSavepoint_stmt is called when production savepoint_stmt is entered. +func (s *BaseSQLiteParserListener) EnterSavepoint_stmt(ctx *Savepoint_stmtContext) {} + +// ExitSavepoint_stmt is called when production savepoint_stmt is exited. +func (s *BaseSQLiteParserListener) ExitSavepoint_stmt(ctx *Savepoint_stmtContext) {} + +// EnterRelease_stmt is called when production release_stmt is entered. +func (s *BaseSQLiteParserListener) EnterRelease_stmt(ctx *Release_stmtContext) {} + +// ExitRelease_stmt is called when production release_stmt is exited. +func (s *BaseSQLiteParserListener) ExitRelease_stmt(ctx *Release_stmtContext) {} + +// EnterCreate_index_stmt is called when production create_index_stmt is entered. +func (s *BaseSQLiteParserListener) EnterCreate_index_stmt(ctx *Create_index_stmtContext) {} + +// ExitCreate_index_stmt is called when production create_index_stmt is exited. +func (s *BaseSQLiteParserListener) ExitCreate_index_stmt(ctx *Create_index_stmtContext) {} + +// EnterIndexed_column is called when production indexed_column is entered. +func (s *BaseSQLiteParserListener) EnterIndexed_column(ctx *Indexed_columnContext) {} + +// ExitIndexed_column is called when production indexed_column is exited. +func (s *BaseSQLiteParserListener) ExitIndexed_column(ctx *Indexed_columnContext) {} + +// EnterCreate_table_stmt is called when production create_table_stmt is entered. +func (s *BaseSQLiteParserListener) EnterCreate_table_stmt(ctx *Create_table_stmtContext) {} + +// ExitCreate_table_stmt is called when production create_table_stmt is exited. +func (s *BaseSQLiteParserListener) ExitCreate_table_stmt(ctx *Create_table_stmtContext) {} + +// EnterColumn_def is called when production column_def is entered. +func (s *BaseSQLiteParserListener) EnterColumn_def(ctx *Column_defContext) {} + +// ExitColumn_def is called when production column_def is exited. +func (s *BaseSQLiteParserListener) ExitColumn_def(ctx *Column_defContext) {} + +// EnterType_name is called when production type_name is entered. +func (s *BaseSQLiteParserListener) EnterType_name(ctx *Type_nameContext) {} + +// ExitType_name is called when production type_name is exited. +func (s *BaseSQLiteParserListener) ExitType_name(ctx *Type_nameContext) {} + +// EnterColumn_constraint is called when production column_constraint is entered. +func (s *BaseSQLiteParserListener) EnterColumn_constraint(ctx *Column_constraintContext) {} + +// ExitColumn_constraint is called when production column_constraint is exited. +func (s *BaseSQLiteParserListener) ExitColumn_constraint(ctx *Column_constraintContext) {} + +// EnterSigned_number is called when production signed_number is entered. +func (s *BaseSQLiteParserListener) EnterSigned_number(ctx *Signed_numberContext) {} + +// ExitSigned_number is called when production signed_number is exited. +func (s *BaseSQLiteParserListener) ExitSigned_number(ctx *Signed_numberContext) {} + +// EnterTable_constraint is called when production table_constraint is entered. +func (s *BaseSQLiteParserListener) EnterTable_constraint(ctx *Table_constraintContext) {} + +// ExitTable_constraint is called when production table_constraint is exited. +func (s *BaseSQLiteParserListener) ExitTable_constraint(ctx *Table_constraintContext) {} + +// EnterForeign_key_clause is called when production foreign_key_clause is entered. +func (s *BaseSQLiteParserListener) EnterForeign_key_clause(ctx *Foreign_key_clauseContext) {} + +// ExitForeign_key_clause is called when production foreign_key_clause is exited. +func (s *BaseSQLiteParserListener) ExitForeign_key_clause(ctx *Foreign_key_clauseContext) {} + +// EnterConflict_clause is called when production conflict_clause is entered. +func (s *BaseSQLiteParserListener) EnterConflict_clause(ctx *Conflict_clauseContext) {} + +// ExitConflict_clause is called when production conflict_clause is exited. +func (s *BaseSQLiteParserListener) ExitConflict_clause(ctx *Conflict_clauseContext) {} + +// EnterCreate_trigger_stmt is called when production create_trigger_stmt is entered. +func (s *BaseSQLiteParserListener) EnterCreate_trigger_stmt(ctx *Create_trigger_stmtContext) {} + +// ExitCreate_trigger_stmt is called when production create_trigger_stmt is exited. +func (s *BaseSQLiteParserListener) ExitCreate_trigger_stmt(ctx *Create_trigger_stmtContext) {} + +// EnterCreate_view_stmt is called when production create_view_stmt is entered. +func (s *BaseSQLiteParserListener) EnterCreate_view_stmt(ctx *Create_view_stmtContext) {} + +// ExitCreate_view_stmt is called when production create_view_stmt is exited. +func (s *BaseSQLiteParserListener) ExitCreate_view_stmt(ctx *Create_view_stmtContext) {} + +// EnterCreate_virtual_table_stmt is called when production create_virtual_table_stmt is entered. +func (s *BaseSQLiteParserListener) EnterCreate_virtual_table_stmt(ctx *Create_virtual_table_stmtContext) { +} + +// ExitCreate_virtual_table_stmt is called when production create_virtual_table_stmt is exited. +func (s *BaseSQLiteParserListener) ExitCreate_virtual_table_stmt(ctx *Create_virtual_table_stmtContext) { +} + +// EnterWith_clause is called when production with_clause is entered. +func (s *BaseSQLiteParserListener) EnterWith_clause(ctx *With_clauseContext) {} + +// ExitWith_clause is called when production with_clause is exited. +func (s *BaseSQLiteParserListener) ExitWith_clause(ctx *With_clauseContext) {} + +// EnterCte_table_name is called when production cte_table_name is entered. +func (s *BaseSQLiteParserListener) EnterCte_table_name(ctx *Cte_table_nameContext) {} + +// ExitCte_table_name is called when production cte_table_name is exited. +func (s *BaseSQLiteParserListener) ExitCte_table_name(ctx *Cte_table_nameContext) {} + +// EnterRecursive_cte is called when production recursive_cte is entered. +func (s *BaseSQLiteParserListener) EnterRecursive_cte(ctx *Recursive_cteContext) {} + +// ExitRecursive_cte is called when production recursive_cte is exited. +func (s *BaseSQLiteParserListener) ExitRecursive_cte(ctx *Recursive_cteContext) {} + +// EnterCommon_table_expression is called when production common_table_expression is entered. +func (s *BaseSQLiteParserListener) EnterCommon_table_expression(ctx *Common_table_expressionContext) { +} + +// ExitCommon_table_expression is called when production common_table_expression is exited. +func (s *BaseSQLiteParserListener) ExitCommon_table_expression(ctx *Common_table_expressionContext) {} + +// EnterDelete_stmt is called when production delete_stmt is entered. +func (s *BaseSQLiteParserListener) EnterDelete_stmt(ctx *Delete_stmtContext) {} + +// ExitDelete_stmt is called when production delete_stmt is exited. +func (s *BaseSQLiteParserListener) ExitDelete_stmt(ctx *Delete_stmtContext) {} + +// EnterDelete_stmt_limited is called when production delete_stmt_limited is entered. +func (s *BaseSQLiteParserListener) EnterDelete_stmt_limited(ctx *Delete_stmt_limitedContext) {} + +// ExitDelete_stmt_limited is called when production delete_stmt_limited is exited. +func (s *BaseSQLiteParserListener) ExitDelete_stmt_limited(ctx *Delete_stmt_limitedContext) {} + +// EnterDetach_stmt is called when production detach_stmt is entered. +func (s *BaseSQLiteParserListener) EnterDetach_stmt(ctx *Detach_stmtContext) {} + +// ExitDetach_stmt is called when production detach_stmt is exited. +func (s *BaseSQLiteParserListener) ExitDetach_stmt(ctx *Detach_stmtContext) {} + +// EnterDrop_stmt is called when production drop_stmt is entered. +func (s *BaseSQLiteParserListener) EnterDrop_stmt(ctx *Drop_stmtContext) {} + +// ExitDrop_stmt is called when production drop_stmt is exited. +func (s *BaseSQLiteParserListener) ExitDrop_stmt(ctx *Drop_stmtContext) {} + +// EnterExpr is called when production expr is entered. +func (s *BaseSQLiteParserListener) EnterExpr(ctx *ExprContext) {} + +// ExitExpr is called when production expr is exited. +func (s *BaseSQLiteParserListener) ExitExpr(ctx *ExprContext) {} + +// EnterRaise_function is called when production raise_function is entered. +func (s *BaseSQLiteParserListener) EnterRaise_function(ctx *Raise_functionContext) {} + +// ExitRaise_function is called when production raise_function is exited. +func (s *BaseSQLiteParserListener) ExitRaise_function(ctx *Raise_functionContext) {} + +// EnterLiteral_value is called when production literal_value is entered. +func (s *BaseSQLiteParserListener) EnterLiteral_value(ctx *Literal_valueContext) {} + +// ExitLiteral_value is called when production literal_value is exited. +func (s *BaseSQLiteParserListener) ExitLiteral_value(ctx *Literal_valueContext) {} + +// EnterValue_row is called when production value_row is entered. +func (s *BaseSQLiteParserListener) EnterValue_row(ctx *Value_rowContext) {} + +// ExitValue_row is called when production value_row is exited. +func (s *BaseSQLiteParserListener) ExitValue_row(ctx *Value_rowContext) {} + +// EnterValues_clause is called when production values_clause is entered. +func (s *BaseSQLiteParserListener) EnterValues_clause(ctx *Values_clauseContext) {} + +// ExitValues_clause is called when production values_clause is exited. +func (s *BaseSQLiteParserListener) ExitValues_clause(ctx *Values_clauseContext) {} + +// EnterInsert_stmt is called when production insert_stmt is entered. +func (s *BaseSQLiteParserListener) EnterInsert_stmt(ctx *Insert_stmtContext) {} + +// ExitInsert_stmt is called when production insert_stmt is exited. +func (s *BaseSQLiteParserListener) ExitInsert_stmt(ctx *Insert_stmtContext) {} + +// EnterReturning_clause is called when production returning_clause is entered. +func (s *BaseSQLiteParserListener) EnterReturning_clause(ctx *Returning_clauseContext) {} + +// ExitReturning_clause is called when production returning_clause is exited. +func (s *BaseSQLiteParserListener) ExitReturning_clause(ctx *Returning_clauseContext) {} + +// EnterUpsert_clause is called when production upsert_clause is entered. +func (s *BaseSQLiteParserListener) EnterUpsert_clause(ctx *Upsert_clauseContext) {} + +// ExitUpsert_clause is called when production upsert_clause is exited. +func (s *BaseSQLiteParserListener) ExitUpsert_clause(ctx *Upsert_clauseContext) {} + +// EnterPragma_stmt is called when production pragma_stmt is entered. +func (s *BaseSQLiteParserListener) EnterPragma_stmt(ctx *Pragma_stmtContext) {} + +// ExitPragma_stmt is called when production pragma_stmt is exited. +func (s *BaseSQLiteParserListener) ExitPragma_stmt(ctx *Pragma_stmtContext) {} + +// EnterPragma_value is called when production pragma_value is entered. +func (s *BaseSQLiteParserListener) EnterPragma_value(ctx *Pragma_valueContext) {} + +// ExitPragma_value is called when production pragma_value is exited. +func (s *BaseSQLiteParserListener) ExitPragma_value(ctx *Pragma_valueContext) {} + +// EnterReindex_stmt is called when production reindex_stmt is entered. +func (s *BaseSQLiteParserListener) EnterReindex_stmt(ctx *Reindex_stmtContext) {} + +// ExitReindex_stmt is called when production reindex_stmt is exited. +func (s *BaseSQLiteParserListener) ExitReindex_stmt(ctx *Reindex_stmtContext) {} + +// EnterSelect_stmt is called when production select_stmt is entered. +func (s *BaseSQLiteParserListener) EnterSelect_stmt(ctx *Select_stmtContext) {} + +// ExitSelect_stmt is called when production select_stmt is exited. +func (s *BaseSQLiteParserListener) ExitSelect_stmt(ctx *Select_stmtContext) {} + +// EnterJoin_clause is called when production join_clause is entered. +func (s *BaseSQLiteParserListener) EnterJoin_clause(ctx *Join_clauseContext) {} + +// ExitJoin_clause is called when production join_clause is exited. +func (s *BaseSQLiteParserListener) ExitJoin_clause(ctx *Join_clauseContext) {} + +// EnterSelect_core is called when production select_core is entered. +func (s *BaseSQLiteParserListener) EnterSelect_core(ctx *Select_coreContext) {} + +// ExitSelect_core is called when production select_core is exited. +func (s *BaseSQLiteParserListener) ExitSelect_core(ctx *Select_coreContext) {} + +// EnterFactored_select_stmt is called when production factored_select_stmt is entered. +func (s *BaseSQLiteParserListener) EnterFactored_select_stmt(ctx *Factored_select_stmtContext) {} + +// ExitFactored_select_stmt is called when production factored_select_stmt is exited. +func (s *BaseSQLiteParserListener) ExitFactored_select_stmt(ctx *Factored_select_stmtContext) {} + +// EnterSimple_select_stmt is called when production simple_select_stmt is entered. +func (s *BaseSQLiteParserListener) EnterSimple_select_stmt(ctx *Simple_select_stmtContext) {} + +// ExitSimple_select_stmt is called when production simple_select_stmt is exited. +func (s *BaseSQLiteParserListener) ExitSimple_select_stmt(ctx *Simple_select_stmtContext) {} + +// EnterCompound_select_stmt is called when production compound_select_stmt is entered. +func (s *BaseSQLiteParserListener) EnterCompound_select_stmt(ctx *Compound_select_stmtContext) {} + +// ExitCompound_select_stmt is called when production compound_select_stmt is exited. +func (s *BaseSQLiteParserListener) ExitCompound_select_stmt(ctx *Compound_select_stmtContext) {} + +// EnterTable_or_subquery is called when production table_or_subquery is entered. +func (s *BaseSQLiteParserListener) EnterTable_or_subquery(ctx *Table_or_subqueryContext) {} + +// ExitTable_or_subquery is called when production table_or_subquery is exited. +func (s *BaseSQLiteParserListener) ExitTable_or_subquery(ctx *Table_or_subqueryContext) {} + +// EnterResult_column is called when production result_column is entered. +func (s *BaseSQLiteParserListener) EnterResult_column(ctx *Result_columnContext) {} + +// ExitResult_column is called when production result_column is exited. +func (s *BaseSQLiteParserListener) ExitResult_column(ctx *Result_columnContext) {} + +// EnterJoin_operator is called when production join_operator is entered. +func (s *BaseSQLiteParserListener) EnterJoin_operator(ctx *Join_operatorContext) {} + +// ExitJoin_operator is called when production join_operator is exited. +func (s *BaseSQLiteParserListener) ExitJoin_operator(ctx *Join_operatorContext) {} + +// EnterJoin_constraint is called when production join_constraint is entered. +func (s *BaseSQLiteParserListener) EnterJoin_constraint(ctx *Join_constraintContext) {} + +// ExitJoin_constraint is called when production join_constraint is exited. +func (s *BaseSQLiteParserListener) ExitJoin_constraint(ctx *Join_constraintContext) {} + +// EnterCompound_operator is called when production compound_operator is entered. +func (s *BaseSQLiteParserListener) EnterCompound_operator(ctx *Compound_operatorContext) {} + +// ExitCompound_operator is called when production compound_operator is exited. +func (s *BaseSQLiteParserListener) ExitCompound_operator(ctx *Compound_operatorContext) {} + +// EnterUpdate_stmt is called when production update_stmt is entered. +func (s *BaseSQLiteParserListener) EnterUpdate_stmt(ctx *Update_stmtContext) {} + +// ExitUpdate_stmt is called when production update_stmt is exited. +func (s *BaseSQLiteParserListener) ExitUpdate_stmt(ctx *Update_stmtContext) {} + +// EnterColumn_name_list is called when production column_name_list is entered. +func (s *BaseSQLiteParserListener) EnterColumn_name_list(ctx *Column_name_listContext) {} + +// ExitColumn_name_list is called when production column_name_list is exited. +func (s *BaseSQLiteParserListener) ExitColumn_name_list(ctx *Column_name_listContext) {} + +// EnterUpdate_stmt_limited is called when production update_stmt_limited is entered. +func (s *BaseSQLiteParserListener) EnterUpdate_stmt_limited(ctx *Update_stmt_limitedContext) {} + +// ExitUpdate_stmt_limited is called when production update_stmt_limited is exited. +func (s *BaseSQLiteParserListener) ExitUpdate_stmt_limited(ctx *Update_stmt_limitedContext) {} + +// EnterQualified_table_name is called when production qualified_table_name is entered. +func (s *BaseSQLiteParserListener) EnterQualified_table_name(ctx *Qualified_table_nameContext) {} + +// ExitQualified_table_name is called when production qualified_table_name is exited. +func (s *BaseSQLiteParserListener) ExitQualified_table_name(ctx *Qualified_table_nameContext) {} + +// EnterVacuum_stmt is called when production vacuum_stmt is entered. +func (s *BaseSQLiteParserListener) EnterVacuum_stmt(ctx *Vacuum_stmtContext) {} + +// ExitVacuum_stmt is called when production vacuum_stmt is exited. +func (s *BaseSQLiteParserListener) ExitVacuum_stmt(ctx *Vacuum_stmtContext) {} + +// EnterFilter_clause is called when production filter_clause is entered. +func (s *BaseSQLiteParserListener) EnterFilter_clause(ctx *Filter_clauseContext) {} + +// ExitFilter_clause is called when production filter_clause is exited. +func (s *BaseSQLiteParserListener) ExitFilter_clause(ctx *Filter_clauseContext) {} + +// EnterWindow_defn is called when production window_defn is entered. +func (s *BaseSQLiteParserListener) EnterWindow_defn(ctx *Window_defnContext) {} + +// ExitWindow_defn is called when production window_defn is exited. +func (s *BaseSQLiteParserListener) ExitWindow_defn(ctx *Window_defnContext) {} + +// EnterOver_clause is called when production over_clause is entered. +func (s *BaseSQLiteParserListener) EnterOver_clause(ctx *Over_clauseContext) {} + +// ExitOver_clause is called when production over_clause is exited. +func (s *BaseSQLiteParserListener) ExitOver_clause(ctx *Over_clauseContext) {} + +// EnterFrame_spec is called when production frame_spec is entered. +func (s *BaseSQLiteParserListener) EnterFrame_spec(ctx *Frame_specContext) {} + +// ExitFrame_spec is called when production frame_spec is exited. +func (s *BaseSQLiteParserListener) ExitFrame_spec(ctx *Frame_specContext) {} + +// EnterFrame_clause is called when production frame_clause is entered. +func (s *BaseSQLiteParserListener) EnterFrame_clause(ctx *Frame_clauseContext) {} + +// ExitFrame_clause is called when production frame_clause is exited. +func (s *BaseSQLiteParserListener) ExitFrame_clause(ctx *Frame_clauseContext) {} + +// EnterSimple_function_invocation is called when production simple_function_invocation is entered. +func (s *BaseSQLiteParserListener) EnterSimple_function_invocation(ctx *Simple_function_invocationContext) { +} + +// ExitSimple_function_invocation is called when production simple_function_invocation is exited. +func (s *BaseSQLiteParserListener) ExitSimple_function_invocation(ctx *Simple_function_invocationContext) { +} + +// EnterAggregate_function_invocation is called when production aggregate_function_invocation is entered. +func (s *BaseSQLiteParserListener) EnterAggregate_function_invocation(ctx *Aggregate_function_invocationContext) { +} + +// ExitAggregate_function_invocation is called when production aggregate_function_invocation is exited. +func (s *BaseSQLiteParserListener) ExitAggregate_function_invocation(ctx *Aggregate_function_invocationContext) { +} + +// EnterWindow_function_invocation is called when production window_function_invocation is entered. +func (s *BaseSQLiteParserListener) EnterWindow_function_invocation(ctx *Window_function_invocationContext) { +} + +// ExitWindow_function_invocation is called when production window_function_invocation is exited. +func (s *BaseSQLiteParserListener) ExitWindow_function_invocation(ctx *Window_function_invocationContext) { +} + +// EnterCommon_table_stmt is called when production common_table_stmt is entered. +func (s *BaseSQLiteParserListener) EnterCommon_table_stmt(ctx *Common_table_stmtContext) {} + +// ExitCommon_table_stmt is called when production common_table_stmt is exited. +func (s *BaseSQLiteParserListener) ExitCommon_table_stmt(ctx *Common_table_stmtContext) {} + +// EnterOrder_by_stmt is called when production order_by_stmt is entered. +func (s *BaseSQLiteParserListener) EnterOrder_by_stmt(ctx *Order_by_stmtContext) {} + +// ExitOrder_by_stmt is called when production order_by_stmt is exited. +func (s *BaseSQLiteParserListener) ExitOrder_by_stmt(ctx *Order_by_stmtContext) {} + +// EnterLimit_stmt is called when production limit_stmt is entered. +func (s *BaseSQLiteParserListener) EnterLimit_stmt(ctx *Limit_stmtContext) {} + +// ExitLimit_stmt is called when production limit_stmt is exited. +func (s *BaseSQLiteParserListener) ExitLimit_stmt(ctx *Limit_stmtContext) {} + +// EnterOrdering_term is called when production ordering_term is entered. +func (s *BaseSQLiteParserListener) EnterOrdering_term(ctx *Ordering_termContext) {} + +// ExitOrdering_term is called when production ordering_term is exited. +func (s *BaseSQLiteParserListener) ExitOrdering_term(ctx *Ordering_termContext) {} + +// EnterAsc_desc is called when production asc_desc is entered. +func (s *BaseSQLiteParserListener) EnterAsc_desc(ctx *Asc_descContext) {} + +// ExitAsc_desc is called when production asc_desc is exited. +func (s *BaseSQLiteParserListener) ExitAsc_desc(ctx *Asc_descContext) {} + +// EnterFrame_left is called when production frame_left is entered. +func (s *BaseSQLiteParserListener) EnterFrame_left(ctx *Frame_leftContext) {} + +// ExitFrame_left is called when production frame_left is exited. +func (s *BaseSQLiteParserListener) ExitFrame_left(ctx *Frame_leftContext) {} + +// EnterFrame_right is called when production frame_right is entered. +func (s *BaseSQLiteParserListener) EnterFrame_right(ctx *Frame_rightContext) {} + +// ExitFrame_right is called when production frame_right is exited. +func (s *BaseSQLiteParserListener) ExitFrame_right(ctx *Frame_rightContext) {} + +// EnterFrame_single is called when production frame_single is entered. +func (s *BaseSQLiteParserListener) EnterFrame_single(ctx *Frame_singleContext) {} + +// ExitFrame_single is called when production frame_single is exited. +func (s *BaseSQLiteParserListener) ExitFrame_single(ctx *Frame_singleContext) {} + +// EnterWindow_function is called when production window_function is entered. +func (s *BaseSQLiteParserListener) EnterWindow_function(ctx *Window_functionContext) {} + +// ExitWindow_function is called when production window_function is exited. +func (s *BaseSQLiteParserListener) ExitWindow_function(ctx *Window_functionContext) {} + +// EnterOffset is called when production offset is entered. +func (s *BaseSQLiteParserListener) EnterOffset(ctx *OffsetContext) {} + +// ExitOffset is called when production offset is exited. +func (s *BaseSQLiteParserListener) ExitOffset(ctx *OffsetContext) {} + +// EnterDefault_value is called when production default_value is entered. +func (s *BaseSQLiteParserListener) EnterDefault_value(ctx *Default_valueContext) {} + +// ExitDefault_value is called when production default_value is exited. +func (s *BaseSQLiteParserListener) ExitDefault_value(ctx *Default_valueContext) {} + +// EnterPartition_by is called when production partition_by is entered. +func (s *BaseSQLiteParserListener) EnterPartition_by(ctx *Partition_byContext) {} + +// ExitPartition_by is called when production partition_by is exited. +func (s *BaseSQLiteParserListener) ExitPartition_by(ctx *Partition_byContext) {} + +// EnterOrder_by_expr is called when production order_by_expr is entered. +func (s *BaseSQLiteParserListener) EnterOrder_by_expr(ctx *Order_by_exprContext) {} + +// ExitOrder_by_expr is called when production order_by_expr is exited. +func (s *BaseSQLiteParserListener) ExitOrder_by_expr(ctx *Order_by_exprContext) {} + +// EnterOrder_by_expr_asc_desc is called when production order_by_expr_asc_desc is entered. +func (s *BaseSQLiteParserListener) EnterOrder_by_expr_asc_desc(ctx *Order_by_expr_asc_descContext) {} + +// ExitOrder_by_expr_asc_desc is called when production order_by_expr_asc_desc is exited. +func (s *BaseSQLiteParserListener) ExitOrder_by_expr_asc_desc(ctx *Order_by_expr_asc_descContext) {} + +// EnterExpr_asc_desc is called when production expr_asc_desc is entered. +func (s *BaseSQLiteParserListener) EnterExpr_asc_desc(ctx *Expr_asc_descContext) {} + +// ExitExpr_asc_desc is called when production expr_asc_desc is exited. +func (s *BaseSQLiteParserListener) ExitExpr_asc_desc(ctx *Expr_asc_descContext) {} + +// EnterInitial_select is called when production initial_select is entered. +func (s *BaseSQLiteParserListener) EnterInitial_select(ctx *Initial_selectContext) {} + +// ExitInitial_select is called when production initial_select is exited. +func (s *BaseSQLiteParserListener) ExitInitial_select(ctx *Initial_selectContext) {} + +// EnterRecursive_select is called when production recursive_select is entered. +func (s *BaseSQLiteParserListener) EnterRecursive_select(ctx *Recursive_selectContext) {} + +// ExitRecursive_select is called when production recursive_select is exited. +func (s *BaseSQLiteParserListener) ExitRecursive_select(ctx *Recursive_selectContext) {} + +// EnterUnary_operator is called when production unary_operator is entered. +func (s *BaseSQLiteParserListener) EnterUnary_operator(ctx *Unary_operatorContext) {} + +// ExitUnary_operator is called when production unary_operator is exited. +func (s *BaseSQLiteParserListener) ExitUnary_operator(ctx *Unary_operatorContext) {} + +// EnterError_message is called when production error_message is entered. +func (s *BaseSQLiteParserListener) EnterError_message(ctx *Error_messageContext) {} + +// ExitError_message is called when production error_message is exited. +func (s *BaseSQLiteParserListener) ExitError_message(ctx *Error_messageContext) {} + +// EnterModule_argument is called when production module_argument is entered. +func (s *BaseSQLiteParserListener) EnterModule_argument(ctx *Module_argumentContext) {} + +// ExitModule_argument is called when production module_argument is exited. +func (s *BaseSQLiteParserListener) ExitModule_argument(ctx *Module_argumentContext) {} + +// EnterColumn_alias is called when production column_alias is entered. +func (s *BaseSQLiteParserListener) EnterColumn_alias(ctx *Column_aliasContext) {} + +// ExitColumn_alias is called when production column_alias is exited. +func (s *BaseSQLiteParserListener) ExitColumn_alias(ctx *Column_aliasContext) {} + +// EnterKeyword is called when production keyword is entered. +func (s *BaseSQLiteParserListener) EnterKeyword(ctx *KeywordContext) {} + +// ExitKeyword is called when production keyword is exited. +func (s *BaseSQLiteParserListener) ExitKeyword(ctx *KeywordContext) {} + +// EnterName is called when production name is entered. +func (s *BaseSQLiteParserListener) EnterName(ctx *NameContext) {} + +// ExitName is called when production name is exited. +func (s *BaseSQLiteParserListener) ExitName(ctx *NameContext) {} + +// EnterFunction_name is called when production function_name is entered. +func (s *BaseSQLiteParserListener) EnterFunction_name(ctx *Function_nameContext) {} + +// ExitFunction_name is called when production function_name is exited. +func (s *BaseSQLiteParserListener) ExitFunction_name(ctx *Function_nameContext) {} + +// EnterSchema_name is called when production schema_name is entered. +func (s *BaseSQLiteParserListener) EnterSchema_name(ctx *Schema_nameContext) {} + +// ExitSchema_name is called when production schema_name is exited. +func (s *BaseSQLiteParserListener) ExitSchema_name(ctx *Schema_nameContext) {} + +// EnterTable_name is called when production table_name is entered. +func (s *BaseSQLiteParserListener) EnterTable_name(ctx *Table_nameContext) {} + +// ExitTable_name is called when production table_name is exited. +func (s *BaseSQLiteParserListener) ExitTable_name(ctx *Table_nameContext) {} + +// EnterTable_or_index_name is called when production table_or_index_name is entered. +func (s *BaseSQLiteParserListener) EnterTable_or_index_name(ctx *Table_or_index_nameContext) {} + +// ExitTable_or_index_name is called when production table_or_index_name is exited. +func (s *BaseSQLiteParserListener) ExitTable_or_index_name(ctx *Table_or_index_nameContext) {} + +// EnterColumn_name is called when production column_name is entered. +func (s *BaseSQLiteParserListener) EnterColumn_name(ctx *Column_nameContext) {} + +// ExitColumn_name is called when production column_name is exited. +func (s *BaseSQLiteParserListener) ExitColumn_name(ctx *Column_nameContext) {} + +// EnterCollation_name is called when production collation_name is entered. +func (s *BaseSQLiteParserListener) EnterCollation_name(ctx *Collation_nameContext) {} + +// ExitCollation_name is called when production collation_name is exited. +func (s *BaseSQLiteParserListener) ExitCollation_name(ctx *Collation_nameContext) {} + +// EnterForeign_table is called when production foreign_table is entered. +func (s *BaseSQLiteParserListener) EnterForeign_table(ctx *Foreign_tableContext) {} + +// ExitForeign_table is called when production foreign_table is exited. +func (s *BaseSQLiteParserListener) ExitForeign_table(ctx *Foreign_tableContext) {} + +// EnterIndex_name is called when production index_name is entered. +func (s *BaseSQLiteParserListener) EnterIndex_name(ctx *Index_nameContext) {} + +// ExitIndex_name is called when production index_name is exited. +func (s *BaseSQLiteParserListener) ExitIndex_name(ctx *Index_nameContext) {} + +// EnterTrigger_name is called when production trigger_name is entered. +func (s *BaseSQLiteParserListener) EnterTrigger_name(ctx *Trigger_nameContext) {} + +// ExitTrigger_name is called when production trigger_name is exited. +func (s *BaseSQLiteParserListener) ExitTrigger_name(ctx *Trigger_nameContext) {} + +// EnterView_name is called when production view_name is entered. +func (s *BaseSQLiteParserListener) EnterView_name(ctx *View_nameContext) {} + +// ExitView_name is called when production view_name is exited. +func (s *BaseSQLiteParserListener) ExitView_name(ctx *View_nameContext) {} + +// EnterModule_name is called when production module_name is entered. +func (s *BaseSQLiteParserListener) EnterModule_name(ctx *Module_nameContext) {} + +// ExitModule_name is called when production module_name is exited. +func (s *BaseSQLiteParserListener) ExitModule_name(ctx *Module_nameContext) {} + +// EnterPragma_name is called when production pragma_name is entered. +func (s *BaseSQLiteParserListener) EnterPragma_name(ctx *Pragma_nameContext) {} + +// ExitPragma_name is called when production pragma_name is exited. +func (s *BaseSQLiteParserListener) ExitPragma_name(ctx *Pragma_nameContext) {} + +// EnterSavepoint_name is called when production savepoint_name is entered. +func (s *BaseSQLiteParserListener) EnterSavepoint_name(ctx *Savepoint_nameContext) {} + +// ExitSavepoint_name is called when production savepoint_name is exited. +func (s *BaseSQLiteParserListener) ExitSavepoint_name(ctx *Savepoint_nameContext) {} + +// EnterTable_alias is called when production table_alias is entered. +func (s *BaseSQLiteParserListener) EnterTable_alias(ctx *Table_aliasContext) {} + +// ExitTable_alias is called when production table_alias is exited. +func (s *BaseSQLiteParserListener) ExitTable_alias(ctx *Table_aliasContext) {} + +// EnterTransaction_name is called when production transaction_name is entered. +func (s *BaseSQLiteParserListener) EnterTransaction_name(ctx *Transaction_nameContext) {} + +// ExitTransaction_name is called when production transaction_name is exited. +func (s *BaseSQLiteParserListener) ExitTransaction_name(ctx *Transaction_nameContext) {} + +// EnterWindow_name is called when production window_name is entered. +func (s *BaseSQLiteParserListener) EnterWindow_name(ctx *Window_nameContext) {} + +// ExitWindow_name is called when production window_name is exited. +func (s *BaseSQLiteParserListener) ExitWindow_name(ctx *Window_nameContext) {} + +// EnterAlias is called when production alias is entered. +func (s *BaseSQLiteParserListener) EnterAlias(ctx *AliasContext) {} + +// ExitAlias is called when production alias is exited. +func (s *BaseSQLiteParserListener) ExitAlias(ctx *AliasContext) {} + +// EnterFilename is called when production filename is entered. +func (s *BaseSQLiteParserListener) EnterFilename(ctx *FilenameContext) {} + +// ExitFilename is called when production filename is exited. +func (s *BaseSQLiteParserListener) ExitFilename(ctx *FilenameContext) {} + +// EnterBase_window_name is called when production base_window_name is entered. +func (s *BaseSQLiteParserListener) EnterBase_window_name(ctx *Base_window_nameContext) {} + +// ExitBase_window_name is called when production base_window_name is exited. +func (s *BaseSQLiteParserListener) ExitBase_window_name(ctx *Base_window_nameContext) {} + +// EnterSimple_func is called when production simple_func is entered. +func (s *BaseSQLiteParserListener) EnterSimple_func(ctx *Simple_funcContext) {} + +// ExitSimple_func is called when production simple_func is exited. +func (s *BaseSQLiteParserListener) ExitSimple_func(ctx *Simple_funcContext) {} + +// EnterAggregate_func is called when production aggregate_func is entered. +func (s *BaseSQLiteParserListener) EnterAggregate_func(ctx *Aggregate_funcContext) {} + +// ExitAggregate_func is called when production aggregate_func is exited. +func (s *BaseSQLiteParserListener) ExitAggregate_func(ctx *Aggregate_funcContext) {} + +// EnterTable_function_name is called when production table_function_name is entered. +func (s *BaseSQLiteParserListener) EnterTable_function_name(ctx *Table_function_nameContext) {} + +// ExitTable_function_name is called when production table_function_name is exited. +func (s *BaseSQLiteParserListener) ExitTable_function_name(ctx *Table_function_nameContext) {} + +// EnterAny_name is called when production any_name is entered. +func (s *BaseSQLiteParserListener) EnterAny_name(ctx *Any_nameContext) {} + +// ExitAny_name is called when production any_name is exited. +func (s *BaseSQLiteParserListener) ExitAny_name(ctx *Any_nameContext) {} diff --git a/vendor/github.com/libsql/sqlite-antlr4-parser/sqliteparser/sqliteparser_listener.go b/vendor/github.com/libsql/sqlite-antlr4-parser/sqliteparser/sqliteparser_listener.go index 0d13850c8b..3df12447e6 100644 --- a/vendor/github.com/libsql/sqlite-antlr4-parser/sqliteparser/sqliteparser_listener.go +++ b/vendor/github.com/libsql/sqlite-antlr4-parser/sqliteparser/sqliteparser_listener.go @@ -1,687 +1,687 @@ -// Code generated from SQLiteParser.g4 by ANTLR 4.12.0. DO NOT EDIT. - -package sqliteparser // SQLiteParser -import "github.com/antlr/antlr4/runtime/Go/antlr/v4" - -// SQLiteParserListener is a complete listener for a parse tree produced by SQLiteParser. -type SQLiteParserListener interface { - antlr.ParseTreeListener - - // EnterParse is called when entering the parse production. - EnterParse(c *ParseContext) - - // EnterSql_stmt_list is called when entering the sql_stmt_list production. - EnterSql_stmt_list(c *Sql_stmt_listContext) - - // EnterSql_stmt is called when entering the sql_stmt production. - EnterSql_stmt(c *Sql_stmtContext) - - // EnterAlter_table_stmt is called when entering the alter_table_stmt production. - EnterAlter_table_stmt(c *Alter_table_stmtContext) - - // EnterAnalyze_stmt is called when entering the analyze_stmt production. - EnterAnalyze_stmt(c *Analyze_stmtContext) - - // EnterAttach_stmt is called when entering the attach_stmt production. - EnterAttach_stmt(c *Attach_stmtContext) - - // EnterBegin_stmt is called when entering the begin_stmt production. - EnterBegin_stmt(c *Begin_stmtContext) - - // EnterCommit_stmt is called when entering the commit_stmt production. - EnterCommit_stmt(c *Commit_stmtContext) - - // EnterRollback_stmt is called when entering the rollback_stmt production. - EnterRollback_stmt(c *Rollback_stmtContext) - - // EnterSavepoint_stmt is called when entering the savepoint_stmt production. - EnterSavepoint_stmt(c *Savepoint_stmtContext) - - // EnterRelease_stmt is called when entering the release_stmt production. - EnterRelease_stmt(c *Release_stmtContext) - - // EnterCreate_index_stmt is called when entering the create_index_stmt production. - EnterCreate_index_stmt(c *Create_index_stmtContext) - - // EnterIndexed_column is called when entering the indexed_column production. - EnterIndexed_column(c *Indexed_columnContext) - - // EnterCreate_table_stmt is called when entering the create_table_stmt production. - EnterCreate_table_stmt(c *Create_table_stmtContext) - - // EnterColumn_def is called when entering the column_def production. - EnterColumn_def(c *Column_defContext) - - // EnterType_name is called when entering the type_name production. - EnterType_name(c *Type_nameContext) - - // EnterColumn_constraint is called when entering the column_constraint production. - EnterColumn_constraint(c *Column_constraintContext) - - // EnterSigned_number is called when entering the signed_number production. - EnterSigned_number(c *Signed_numberContext) - - // EnterTable_constraint is called when entering the table_constraint production. - EnterTable_constraint(c *Table_constraintContext) - - // EnterForeign_key_clause is called when entering the foreign_key_clause production. - EnterForeign_key_clause(c *Foreign_key_clauseContext) - - // EnterConflict_clause is called when entering the conflict_clause production. - EnterConflict_clause(c *Conflict_clauseContext) - - // EnterCreate_trigger_stmt is called when entering the create_trigger_stmt production. - EnterCreate_trigger_stmt(c *Create_trigger_stmtContext) - - // EnterCreate_view_stmt is called when entering the create_view_stmt production. - EnterCreate_view_stmt(c *Create_view_stmtContext) - - // EnterCreate_virtual_table_stmt is called when entering the create_virtual_table_stmt production. - EnterCreate_virtual_table_stmt(c *Create_virtual_table_stmtContext) - - // EnterWith_clause is called when entering the with_clause production. - EnterWith_clause(c *With_clauseContext) - - // EnterCte_table_name is called when entering the cte_table_name production. - EnterCte_table_name(c *Cte_table_nameContext) - - // EnterRecursive_cte is called when entering the recursive_cte production. - EnterRecursive_cte(c *Recursive_cteContext) - - // EnterCommon_table_expression is called when entering the common_table_expression production. - EnterCommon_table_expression(c *Common_table_expressionContext) - - // EnterDelete_stmt is called when entering the delete_stmt production. - EnterDelete_stmt(c *Delete_stmtContext) - - // EnterDelete_stmt_limited is called when entering the delete_stmt_limited production. - EnterDelete_stmt_limited(c *Delete_stmt_limitedContext) - - // EnterDetach_stmt is called when entering the detach_stmt production. - EnterDetach_stmt(c *Detach_stmtContext) - - // EnterDrop_stmt is called when entering the drop_stmt production. - EnterDrop_stmt(c *Drop_stmtContext) - - // EnterExpr is called when entering the expr production. - EnterExpr(c *ExprContext) - - // EnterRaise_function is called when entering the raise_function production. - EnterRaise_function(c *Raise_functionContext) - - // EnterLiteral_value is called when entering the literal_value production. - EnterLiteral_value(c *Literal_valueContext) - - // EnterValue_row is called when entering the value_row production. - EnterValue_row(c *Value_rowContext) - - // EnterValues_clause is called when entering the values_clause production. - EnterValues_clause(c *Values_clauseContext) - - // EnterInsert_stmt is called when entering the insert_stmt production. - EnterInsert_stmt(c *Insert_stmtContext) - - // EnterReturning_clause is called when entering the returning_clause production. - EnterReturning_clause(c *Returning_clauseContext) - - // EnterUpsert_clause is called when entering the upsert_clause production. - EnterUpsert_clause(c *Upsert_clauseContext) - - // EnterPragma_stmt is called when entering the pragma_stmt production. - EnterPragma_stmt(c *Pragma_stmtContext) - - // EnterPragma_value is called when entering the pragma_value production. - EnterPragma_value(c *Pragma_valueContext) - - // EnterReindex_stmt is called when entering the reindex_stmt production. - EnterReindex_stmt(c *Reindex_stmtContext) - - // EnterSelect_stmt is called when entering the select_stmt production. - EnterSelect_stmt(c *Select_stmtContext) - - // EnterJoin_clause is called when entering the join_clause production. - EnterJoin_clause(c *Join_clauseContext) - - // EnterSelect_core is called when entering the select_core production. - EnterSelect_core(c *Select_coreContext) - - // EnterFactored_select_stmt is called when entering the factored_select_stmt production. - EnterFactored_select_stmt(c *Factored_select_stmtContext) - - // EnterSimple_select_stmt is called when entering the simple_select_stmt production. - EnterSimple_select_stmt(c *Simple_select_stmtContext) - - // EnterCompound_select_stmt is called when entering the compound_select_stmt production. - EnterCompound_select_stmt(c *Compound_select_stmtContext) - - // EnterTable_or_subquery is called when entering the table_or_subquery production. - EnterTable_or_subquery(c *Table_or_subqueryContext) - - // EnterResult_column is called when entering the result_column production. - EnterResult_column(c *Result_columnContext) - - // EnterJoin_operator is called when entering the join_operator production. - EnterJoin_operator(c *Join_operatorContext) - - // EnterJoin_constraint is called when entering the join_constraint production. - EnterJoin_constraint(c *Join_constraintContext) - - // EnterCompound_operator is called when entering the compound_operator production. - EnterCompound_operator(c *Compound_operatorContext) - - // EnterUpdate_stmt is called when entering the update_stmt production. - EnterUpdate_stmt(c *Update_stmtContext) - - // EnterColumn_name_list is called when entering the column_name_list production. - EnterColumn_name_list(c *Column_name_listContext) - - // EnterUpdate_stmt_limited is called when entering the update_stmt_limited production. - EnterUpdate_stmt_limited(c *Update_stmt_limitedContext) - - // EnterQualified_table_name is called when entering the qualified_table_name production. - EnterQualified_table_name(c *Qualified_table_nameContext) - - // EnterVacuum_stmt is called when entering the vacuum_stmt production. - EnterVacuum_stmt(c *Vacuum_stmtContext) - - // EnterFilter_clause is called when entering the filter_clause production. - EnterFilter_clause(c *Filter_clauseContext) - - // EnterWindow_defn is called when entering the window_defn production. - EnterWindow_defn(c *Window_defnContext) - - // EnterOver_clause is called when entering the over_clause production. - EnterOver_clause(c *Over_clauseContext) - - // EnterFrame_spec is called when entering the frame_spec production. - EnterFrame_spec(c *Frame_specContext) - - // EnterFrame_clause is called when entering the frame_clause production. - EnterFrame_clause(c *Frame_clauseContext) - - // EnterSimple_function_invocation is called when entering the simple_function_invocation production. - EnterSimple_function_invocation(c *Simple_function_invocationContext) - - // EnterAggregate_function_invocation is called when entering the aggregate_function_invocation production. - EnterAggregate_function_invocation(c *Aggregate_function_invocationContext) - - // EnterWindow_function_invocation is called when entering the window_function_invocation production. - EnterWindow_function_invocation(c *Window_function_invocationContext) - - // EnterCommon_table_stmt is called when entering the common_table_stmt production. - EnterCommon_table_stmt(c *Common_table_stmtContext) - - // EnterOrder_by_stmt is called when entering the order_by_stmt production. - EnterOrder_by_stmt(c *Order_by_stmtContext) - - // EnterLimit_stmt is called when entering the limit_stmt production. - EnterLimit_stmt(c *Limit_stmtContext) - - // EnterOrdering_term is called when entering the ordering_term production. - EnterOrdering_term(c *Ordering_termContext) - - // EnterAsc_desc is called when entering the asc_desc production. - EnterAsc_desc(c *Asc_descContext) - - // EnterFrame_left is called when entering the frame_left production. - EnterFrame_left(c *Frame_leftContext) - - // EnterFrame_right is called when entering the frame_right production. - EnterFrame_right(c *Frame_rightContext) - - // EnterFrame_single is called when entering the frame_single production. - EnterFrame_single(c *Frame_singleContext) - - // EnterWindow_function is called when entering the window_function production. - EnterWindow_function(c *Window_functionContext) - - // EnterOffset is called when entering the offset production. - EnterOffset(c *OffsetContext) - - // EnterDefault_value is called when entering the default_value production. - EnterDefault_value(c *Default_valueContext) - - // EnterPartition_by is called when entering the partition_by production. - EnterPartition_by(c *Partition_byContext) - - // EnterOrder_by_expr is called when entering the order_by_expr production. - EnterOrder_by_expr(c *Order_by_exprContext) - - // EnterOrder_by_expr_asc_desc is called when entering the order_by_expr_asc_desc production. - EnterOrder_by_expr_asc_desc(c *Order_by_expr_asc_descContext) - - // EnterExpr_asc_desc is called when entering the expr_asc_desc production. - EnterExpr_asc_desc(c *Expr_asc_descContext) - - // EnterInitial_select is called when entering the initial_select production. - EnterInitial_select(c *Initial_selectContext) - - // EnterRecursive_select is called when entering the recursive_select production. - EnterRecursive_select(c *Recursive_selectContext) - - // EnterUnary_operator is called when entering the unary_operator production. - EnterUnary_operator(c *Unary_operatorContext) - - // EnterError_message is called when entering the error_message production. - EnterError_message(c *Error_messageContext) - - // EnterModule_argument is called when entering the module_argument production. - EnterModule_argument(c *Module_argumentContext) - - // EnterColumn_alias is called when entering the column_alias production. - EnterColumn_alias(c *Column_aliasContext) - - // EnterKeyword is called when entering the keyword production. - EnterKeyword(c *KeywordContext) - - // EnterName is called when entering the name production. - EnterName(c *NameContext) - - // EnterFunction_name is called when entering the function_name production. - EnterFunction_name(c *Function_nameContext) - - // EnterSchema_name is called when entering the schema_name production. - EnterSchema_name(c *Schema_nameContext) - - // EnterTable_name is called when entering the table_name production. - EnterTable_name(c *Table_nameContext) - - // EnterTable_or_index_name is called when entering the table_or_index_name production. - EnterTable_or_index_name(c *Table_or_index_nameContext) - - // EnterColumn_name is called when entering the column_name production. - EnterColumn_name(c *Column_nameContext) - - // EnterCollation_name is called when entering the collation_name production. - EnterCollation_name(c *Collation_nameContext) - - // EnterForeign_table is called when entering the foreign_table production. - EnterForeign_table(c *Foreign_tableContext) - - // EnterIndex_name is called when entering the index_name production. - EnterIndex_name(c *Index_nameContext) - - // EnterTrigger_name is called when entering the trigger_name production. - EnterTrigger_name(c *Trigger_nameContext) - - // EnterView_name is called when entering the view_name production. - EnterView_name(c *View_nameContext) - - // EnterModule_name is called when entering the module_name production. - EnterModule_name(c *Module_nameContext) - - // EnterPragma_name is called when entering the pragma_name production. - EnterPragma_name(c *Pragma_nameContext) - - // EnterSavepoint_name is called when entering the savepoint_name production. - EnterSavepoint_name(c *Savepoint_nameContext) - - // EnterTable_alias is called when entering the table_alias production. - EnterTable_alias(c *Table_aliasContext) - - // EnterTransaction_name is called when entering the transaction_name production. - EnterTransaction_name(c *Transaction_nameContext) - - // EnterWindow_name is called when entering the window_name production. - EnterWindow_name(c *Window_nameContext) - - // EnterAlias is called when entering the alias production. - EnterAlias(c *AliasContext) - - // EnterFilename is called when entering the filename production. - EnterFilename(c *FilenameContext) - - // EnterBase_window_name is called when entering the base_window_name production. - EnterBase_window_name(c *Base_window_nameContext) - - // EnterSimple_func is called when entering the simple_func production. - EnterSimple_func(c *Simple_funcContext) - - // EnterAggregate_func is called when entering the aggregate_func production. - EnterAggregate_func(c *Aggregate_funcContext) - - // EnterTable_function_name is called when entering the table_function_name production. - EnterTable_function_name(c *Table_function_nameContext) - - // EnterAny_name is called when entering the any_name production. - EnterAny_name(c *Any_nameContext) - - // ExitParse is called when exiting the parse production. - ExitParse(c *ParseContext) - - // ExitSql_stmt_list is called when exiting the sql_stmt_list production. - ExitSql_stmt_list(c *Sql_stmt_listContext) - - // ExitSql_stmt is called when exiting the sql_stmt production. - ExitSql_stmt(c *Sql_stmtContext) - - // ExitAlter_table_stmt is called when exiting the alter_table_stmt production. - ExitAlter_table_stmt(c *Alter_table_stmtContext) - - // ExitAnalyze_stmt is called when exiting the analyze_stmt production. - ExitAnalyze_stmt(c *Analyze_stmtContext) - - // ExitAttach_stmt is called when exiting the attach_stmt production. - ExitAttach_stmt(c *Attach_stmtContext) - - // ExitBegin_stmt is called when exiting the begin_stmt production. - ExitBegin_stmt(c *Begin_stmtContext) - - // ExitCommit_stmt is called when exiting the commit_stmt production. - ExitCommit_stmt(c *Commit_stmtContext) - - // ExitRollback_stmt is called when exiting the rollback_stmt production. - ExitRollback_stmt(c *Rollback_stmtContext) - - // ExitSavepoint_stmt is called when exiting the savepoint_stmt production. - ExitSavepoint_stmt(c *Savepoint_stmtContext) - - // ExitRelease_stmt is called when exiting the release_stmt production. - ExitRelease_stmt(c *Release_stmtContext) - - // ExitCreate_index_stmt is called when exiting the create_index_stmt production. - ExitCreate_index_stmt(c *Create_index_stmtContext) - - // ExitIndexed_column is called when exiting the indexed_column production. - ExitIndexed_column(c *Indexed_columnContext) - - // ExitCreate_table_stmt is called when exiting the create_table_stmt production. - ExitCreate_table_stmt(c *Create_table_stmtContext) - - // ExitColumn_def is called when exiting the column_def production. - ExitColumn_def(c *Column_defContext) - - // ExitType_name is called when exiting the type_name production. - ExitType_name(c *Type_nameContext) - - // ExitColumn_constraint is called when exiting the column_constraint production. - ExitColumn_constraint(c *Column_constraintContext) - - // ExitSigned_number is called when exiting the signed_number production. - ExitSigned_number(c *Signed_numberContext) - - // ExitTable_constraint is called when exiting the table_constraint production. - ExitTable_constraint(c *Table_constraintContext) - - // ExitForeign_key_clause is called when exiting the foreign_key_clause production. - ExitForeign_key_clause(c *Foreign_key_clauseContext) - - // ExitConflict_clause is called when exiting the conflict_clause production. - ExitConflict_clause(c *Conflict_clauseContext) - - // ExitCreate_trigger_stmt is called when exiting the create_trigger_stmt production. - ExitCreate_trigger_stmt(c *Create_trigger_stmtContext) - - // ExitCreate_view_stmt is called when exiting the create_view_stmt production. - ExitCreate_view_stmt(c *Create_view_stmtContext) - - // ExitCreate_virtual_table_stmt is called when exiting the create_virtual_table_stmt production. - ExitCreate_virtual_table_stmt(c *Create_virtual_table_stmtContext) - - // ExitWith_clause is called when exiting the with_clause production. - ExitWith_clause(c *With_clauseContext) - - // ExitCte_table_name is called when exiting the cte_table_name production. - ExitCte_table_name(c *Cte_table_nameContext) - - // ExitRecursive_cte is called when exiting the recursive_cte production. - ExitRecursive_cte(c *Recursive_cteContext) - - // ExitCommon_table_expression is called when exiting the common_table_expression production. - ExitCommon_table_expression(c *Common_table_expressionContext) - - // ExitDelete_stmt is called when exiting the delete_stmt production. - ExitDelete_stmt(c *Delete_stmtContext) - - // ExitDelete_stmt_limited is called when exiting the delete_stmt_limited production. - ExitDelete_stmt_limited(c *Delete_stmt_limitedContext) - - // ExitDetach_stmt is called when exiting the detach_stmt production. - ExitDetach_stmt(c *Detach_stmtContext) - - // ExitDrop_stmt is called when exiting the drop_stmt production. - ExitDrop_stmt(c *Drop_stmtContext) - - // ExitExpr is called when exiting the expr production. - ExitExpr(c *ExprContext) - - // ExitRaise_function is called when exiting the raise_function production. - ExitRaise_function(c *Raise_functionContext) - - // ExitLiteral_value is called when exiting the literal_value production. - ExitLiteral_value(c *Literal_valueContext) - - // ExitValue_row is called when exiting the value_row production. - ExitValue_row(c *Value_rowContext) - - // ExitValues_clause is called when exiting the values_clause production. - ExitValues_clause(c *Values_clauseContext) - - // ExitInsert_stmt is called when exiting the insert_stmt production. - ExitInsert_stmt(c *Insert_stmtContext) - - // ExitReturning_clause is called when exiting the returning_clause production. - ExitReturning_clause(c *Returning_clauseContext) - - // ExitUpsert_clause is called when exiting the upsert_clause production. - ExitUpsert_clause(c *Upsert_clauseContext) - - // ExitPragma_stmt is called when exiting the pragma_stmt production. - ExitPragma_stmt(c *Pragma_stmtContext) - - // ExitPragma_value is called when exiting the pragma_value production. - ExitPragma_value(c *Pragma_valueContext) - - // ExitReindex_stmt is called when exiting the reindex_stmt production. - ExitReindex_stmt(c *Reindex_stmtContext) - - // ExitSelect_stmt is called when exiting the select_stmt production. - ExitSelect_stmt(c *Select_stmtContext) - - // ExitJoin_clause is called when exiting the join_clause production. - ExitJoin_clause(c *Join_clauseContext) - - // ExitSelect_core is called when exiting the select_core production. - ExitSelect_core(c *Select_coreContext) - - // ExitFactored_select_stmt is called when exiting the factored_select_stmt production. - ExitFactored_select_stmt(c *Factored_select_stmtContext) - - // ExitSimple_select_stmt is called when exiting the simple_select_stmt production. - ExitSimple_select_stmt(c *Simple_select_stmtContext) - - // ExitCompound_select_stmt is called when exiting the compound_select_stmt production. - ExitCompound_select_stmt(c *Compound_select_stmtContext) - - // ExitTable_or_subquery is called when exiting the table_or_subquery production. - ExitTable_or_subquery(c *Table_or_subqueryContext) - - // ExitResult_column is called when exiting the result_column production. - ExitResult_column(c *Result_columnContext) - - // ExitJoin_operator is called when exiting the join_operator production. - ExitJoin_operator(c *Join_operatorContext) - - // ExitJoin_constraint is called when exiting the join_constraint production. - ExitJoin_constraint(c *Join_constraintContext) - - // ExitCompound_operator is called when exiting the compound_operator production. - ExitCompound_operator(c *Compound_operatorContext) - - // ExitUpdate_stmt is called when exiting the update_stmt production. - ExitUpdate_stmt(c *Update_stmtContext) - - // ExitColumn_name_list is called when exiting the column_name_list production. - ExitColumn_name_list(c *Column_name_listContext) - - // ExitUpdate_stmt_limited is called when exiting the update_stmt_limited production. - ExitUpdate_stmt_limited(c *Update_stmt_limitedContext) - - // ExitQualified_table_name is called when exiting the qualified_table_name production. - ExitQualified_table_name(c *Qualified_table_nameContext) - - // ExitVacuum_stmt is called when exiting the vacuum_stmt production. - ExitVacuum_stmt(c *Vacuum_stmtContext) - - // ExitFilter_clause is called when exiting the filter_clause production. - ExitFilter_clause(c *Filter_clauseContext) - - // ExitWindow_defn is called when exiting the window_defn production. - ExitWindow_defn(c *Window_defnContext) - - // ExitOver_clause is called when exiting the over_clause production. - ExitOver_clause(c *Over_clauseContext) - - // ExitFrame_spec is called when exiting the frame_spec production. - ExitFrame_spec(c *Frame_specContext) - - // ExitFrame_clause is called when exiting the frame_clause production. - ExitFrame_clause(c *Frame_clauseContext) - - // ExitSimple_function_invocation is called when exiting the simple_function_invocation production. - ExitSimple_function_invocation(c *Simple_function_invocationContext) - - // ExitAggregate_function_invocation is called when exiting the aggregate_function_invocation production. - ExitAggregate_function_invocation(c *Aggregate_function_invocationContext) - - // ExitWindow_function_invocation is called when exiting the window_function_invocation production. - ExitWindow_function_invocation(c *Window_function_invocationContext) - - // ExitCommon_table_stmt is called when exiting the common_table_stmt production. - ExitCommon_table_stmt(c *Common_table_stmtContext) - - // ExitOrder_by_stmt is called when exiting the order_by_stmt production. - ExitOrder_by_stmt(c *Order_by_stmtContext) - - // ExitLimit_stmt is called when exiting the limit_stmt production. - ExitLimit_stmt(c *Limit_stmtContext) - - // ExitOrdering_term is called when exiting the ordering_term production. - ExitOrdering_term(c *Ordering_termContext) - - // ExitAsc_desc is called when exiting the asc_desc production. - ExitAsc_desc(c *Asc_descContext) - - // ExitFrame_left is called when exiting the frame_left production. - ExitFrame_left(c *Frame_leftContext) - - // ExitFrame_right is called when exiting the frame_right production. - ExitFrame_right(c *Frame_rightContext) - - // ExitFrame_single is called when exiting the frame_single production. - ExitFrame_single(c *Frame_singleContext) - - // ExitWindow_function is called when exiting the window_function production. - ExitWindow_function(c *Window_functionContext) - - // ExitOffset is called when exiting the offset production. - ExitOffset(c *OffsetContext) - - // ExitDefault_value is called when exiting the default_value production. - ExitDefault_value(c *Default_valueContext) - - // ExitPartition_by is called when exiting the partition_by production. - ExitPartition_by(c *Partition_byContext) - - // ExitOrder_by_expr is called when exiting the order_by_expr production. - ExitOrder_by_expr(c *Order_by_exprContext) - - // ExitOrder_by_expr_asc_desc is called when exiting the order_by_expr_asc_desc production. - ExitOrder_by_expr_asc_desc(c *Order_by_expr_asc_descContext) - - // ExitExpr_asc_desc is called when exiting the expr_asc_desc production. - ExitExpr_asc_desc(c *Expr_asc_descContext) - - // ExitInitial_select is called when exiting the initial_select production. - ExitInitial_select(c *Initial_selectContext) - - // ExitRecursive_select is called when exiting the recursive_select production. - ExitRecursive_select(c *Recursive_selectContext) - - // ExitUnary_operator is called when exiting the unary_operator production. - ExitUnary_operator(c *Unary_operatorContext) - - // ExitError_message is called when exiting the error_message production. - ExitError_message(c *Error_messageContext) - - // ExitModule_argument is called when exiting the module_argument production. - ExitModule_argument(c *Module_argumentContext) - - // ExitColumn_alias is called when exiting the column_alias production. - ExitColumn_alias(c *Column_aliasContext) - - // ExitKeyword is called when exiting the keyword production. - ExitKeyword(c *KeywordContext) - - // ExitName is called when exiting the name production. - ExitName(c *NameContext) - - // ExitFunction_name is called when exiting the function_name production. - ExitFunction_name(c *Function_nameContext) - - // ExitSchema_name is called when exiting the schema_name production. - ExitSchema_name(c *Schema_nameContext) - - // ExitTable_name is called when exiting the table_name production. - ExitTable_name(c *Table_nameContext) - - // ExitTable_or_index_name is called when exiting the table_or_index_name production. - ExitTable_or_index_name(c *Table_or_index_nameContext) - - // ExitColumn_name is called when exiting the column_name production. - ExitColumn_name(c *Column_nameContext) - - // ExitCollation_name is called when exiting the collation_name production. - ExitCollation_name(c *Collation_nameContext) - - // ExitForeign_table is called when exiting the foreign_table production. - ExitForeign_table(c *Foreign_tableContext) - - // ExitIndex_name is called when exiting the index_name production. - ExitIndex_name(c *Index_nameContext) - - // ExitTrigger_name is called when exiting the trigger_name production. - ExitTrigger_name(c *Trigger_nameContext) - - // ExitView_name is called when exiting the view_name production. - ExitView_name(c *View_nameContext) - - // ExitModule_name is called when exiting the module_name production. - ExitModule_name(c *Module_nameContext) - - // ExitPragma_name is called when exiting the pragma_name production. - ExitPragma_name(c *Pragma_nameContext) - - // ExitSavepoint_name is called when exiting the savepoint_name production. - ExitSavepoint_name(c *Savepoint_nameContext) - - // ExitTable_alias is called when exiting the table_alias production. - ExitTable_alias(c *Table_aliasContext) - - // ExitTransaction_name is called when exiting the transaction_name production. - ExitTransaction_name(c *Transaction_nameContext) - - // ExitWindow_name is called when exiting the window_name production. - ExitWindow_name(c *Window_nameContext) - - // ExitAlias is called when exiting the alias production. - ExitAlias(c *AliasContext) - - // ExitFilename is called when exiting the filename production. - ExitFilename(c *FilenameContext) - - // ExitBase_window_name is called when exiting the base_window_name production. - ExitBase_window_name(c *Base_window_nameContext) - - // ExitSimple_func is called when exiting the simple_func production. - ExitSimple_func(c *Simple_funcContext) - - // ExitAggregate_func is called when exiting the aggregate_func production. - ExitAggregate_func(c *Aggregate_funcContext) - - // ExitTable_function_name is called when exiting the table_function_name production. - ExitTable_function_name(c *Table_function_nameContext) - - // ExitAny_name is called when exiting the any_name production. - ExitAny_name(c *Any_nameContext) -} +// Code generated from SQLiteParser.g4 by ANTLR 4.12.0. DO NOT EDIT. + +package sqliteparser // SQLiteParser +import "github.com/antlr/antlr4/runtime/Go/antlr/v4" + +// SQLiteParserListener is a complete listener for a parse tree produced by SQLiteParser. +type SQLiteParserListener interface { + antlr.ParseTreeListener + + // EnterParse is called when entering the parse production. + EnterParse(c *ParseContext) + + // EnterSql_stmt_list is called when entering the sql_stmt_list production. + EnterSql_stmt_list(c *Sql_stmt_listContext) + + // EnterSql_stmt is called when entering the sql_stmt production. + EnterSql_stmt(c *Sql_stmtContext) + + // EnterAlter_table_stmt is called when entering the alter_table_stmt production. + EnterAlter_table_stmt(c *Alter_table_stmtContext) + + // EnterAnalyze_stmt is called when entering the analyze_stmt production. + EnterAnalyze_stmt(c *Analyze_stmtContext) + + // EnterAttach_stmt is called when entering the attach_stmt production. + EnterAttach_stmt(c *Attach_stmtContext) + + // EnterBegin_stmt is called when entering the begin_stmt production. + EnterBegin_stmt(c *Begin_stmtContext) + + // EnterCommit_stmt is called when entering the commit_stmt production. + EnterCommit_stmt(c *Commit_stmtContext) + + // EnterRollback_stmt is called when entering the rollback_stmt production. + EnterRollback_stmt(c *Rollback_stmtContext) + + // EnterSavepoint_stmt is called when entering the savepoint_stmt production. + EnterSavepoint_stmt(c *Savepoint_stmtContext) + + // EnterRelease_stmt is called when entering the release_stmt production. + EnterRelease_stmt(c *Release_stmtContext) + + // EnterCreate_index_stmt is called when entering the create_index_stmt production. + EnterCreate_index_stmt(c *Create_index_stmtContext) + + // EnterIndexed_column is called when entering the indexed_column production. + EnterIndexed_column(c *Indexed_columnContext) + + // EnterCreate_table_stmt is called when entering the create_table_stmt production. + EnterCreate_table_stmt(c *Create_table_stmtContext) + + // EnterColumn_def is called when entering the column_def production. + EnterColumn_def(c *Column_defContext) + + // EnterType_name is called when entering the type_name production. + EnterType_name(c *Type_nameContext) + + // EnterColumn_constraint is called when entering the column_constraint production. + EnterColumn_constraint(c *Column_constraintContext) + + // EnterSigned_number is called when entering the signed_number production. + EnterSigned_number(c *Signed_numberContext) + + // EnterTable_constraint is called when entering the table_constraint production. + EnterTable_constraint(c *Table_constraintContext) + + // EnterForeign_key_clause is called when entering the foreign_key_clause production. + EnterForeign_key_clause(c *Foreign_key_clauseContext) + + // EnterConflict_clause is called when entering the conflict_clause production. + EnterConflict_clause(c *Conflict_clauseContext) + + // EnterCreate_trigger_stmt is called when entering the create_trigger_stmt production. + EnterCreate_trigger_stmt(c *Create_trigger_stmtContext) + + // EnterCreate_view_stmt is called when entering the create_view_stmt production. + EnterCreate_view_stmt(c *Create_view_stmtContext) + + // EnterCreate_virtual_table_stmt is called when entering the create_virtual_table_stmt production. + EnterCreate_virtual_table_stmt(c *Create_virtual_table_stmtContext) + + // EnterWith_clause is called when entering the with_clause production. + EnterWith_clause(c *With_clauseContext) + + // EnterCte_table_name is called when entering the cte_table_name production. + EnterCte_table_name(c *Cte_table_nameContext) + + // EnterRecursive_cte is called when entering the recursive_cte production. + EnterRecursive_cte(c *Recursive_cteContext) + + // EnterCommon_table_expression is called when entering the common_table_expression production. + EnterCommon_table_expression(c *Common_table_expressionContext) + + // EnterDelete_stmt is called when entering the delete_stmt production. + EnterDelete_stmt(c *Delete_stmtContext) + + // EnterDelete_stmt_limited is called when entering the delete_stmt_limited production. + EnterDelete_stmt_limited(c *Delete_stmt_limitedContext) + + // EnterDetach_stmt is called when entering the detach_stmt production. + EnterDetach_stmt(c *Detach_stmtContext) + + // EnterDrop_stmt is called when entering the drop_stmt production. + EnterDrop_stmt(c *Drop_stmtContext) + + // EnterExpr is called when entering the expr production. + EnterExpr(c *ExprContext) + + // EnterRaise_function is called when entering the raise_function production. + EnterRaise_function(c *Raise_functionContext) + + // EnterLiteral_value is called when entering the literal_value production. + EnterLiteral_value(c *Literal_valueContext) + + // EnterValue_row is called when entering the value_row production. + EnterValue_row(c *Value_rowContext) + + // EnterValues_clause is called when entering the values_clause production. + EnterValues_clause(c *Values_clauseContext) + + // EnterInsert_stmt is called when entering the insert_stmt production. + EnterInsert_stmt(c *Insert_stmtContext) + + // EnterReturning_clause is called when entering the returning_clause production. + EnterReturning_clause(c *Returning_clauseContext) + + // EnterUpsert_clause is called when entering the upsert_clause production. + EnterUpsert_clause(c *Upsert_clauseContext) + + // EnterPragma_stmt is called when entering the pragma_stmt production. + EnterPragma_stmt(c *Pragma_stmtContext) + + // EnterPragma_value is called when entering the pragma_value production. + EnterPragma_value(c *Pragma_valueContext) + + // EnterReindex_stmt is called when entering the reindex_stmt production. + EnterReindex_stmt(c *Reindex_stmtContext) + + // EnterSelect_stmt is called when entering the select_stmt production. + EnterSelect_stmt(c *Select_stmtContext) + + // EnterJoin_clause is called when entering the join_clause production. + EnterJoin_clause(c *Join_clauseContext) + + // EnterSelect_core is called when entering the select_core production. + EnterSelect_core(c *Select_coreContext) + + // EnterFactored_select_stmt is called when entering the factored_select_stmt production. + EnterFactored_select_stmt(c *Factored_select_stmtContext) + + // EnterSimple_select_stmt is called when entering the simple_select_stmt production. + EnterSimple_select_stmt(c *Simple_select_stmtContext) + + // EnterCompound_select_stmt is called when entering the compound_select_stmt production. + EnterCompound_select_stmt(c *Compound_select_stmtContext) + + // EnterTable_or_subquery is called when entering the table_or_subquery production. + EnterTable_or_subquery(c *Table_or_subqueryContext) + + // EnterResult_column is called when entering the result_column production. + EnterResult_column(c *Result_columnContext) + + // EnterJoin_operator is called when entering the join_operator production. + EnterJoin_operator(c *Join_operatorContext) + + // EnterJoin_constraint is called when entering the join_constraint production. + EnterJoin_constraint(c *Join_constraintContext) + + // EnterCompound_operator is called when entering the compound_operator production. + EnterCompound_operator(c *Compound_operatorContext) + + // EnterUpdate_stmt is called when entering the update_stmt production. + EnterUpdate_stmt(c *Update_stmtContext) + + // EnterColumn_name_list is called when entering the column_name_list production. + EnterColumn_name_list(c *Column_name_listContext) + + // EnterUpdate_stmt_limited is called when entering the update_stmt_limited production. + EnterUpdate_stmt_limited(c *Update_stmt_limitedContext) + + // EnterQualified_table_name is called when entering the qualified_table_name production. + EnterQualified_table_name(c *Qualified_table_nameContext) + + // EnterVacuum_stmt is called when entering the vacuum_stmt production. + EnterVacuum_stmt(c *Vacuum_stmtContext) + + // EnterFilter_clause is called when entering the filter_clause production. + EnterFilter_clause(c *Filter_clauseContext) + + // EnterWindow_defn is called when entering the window_defn production. + EnterWindow_defn(c *Window_defnContext) + + // EnterOver_clause is called when entering the over_clause production. + EnterOver_clause(c *Over_clauseContext) + + // EnterFrame_spec is called when entering the frame_spec production. + EnterFrame_spec(c *Frame_specContext) + + // EnterFrame_clause is called when entering the frame_clause production. + EnterFrame_clause(c *Frame_clauseContext) + + // EnterSimple_function_invocation is called when entering the simple_function_invocation production. + EnterSimple_function_invocation(c *Simple_function_invocationContext) + + // EnterAggregate_function_invocation is called when entering the aggregate_function_invocation production. + EnterAggregate_function_invocation(c *Aggregate_function_invocationContext) + + // EnterWindow_function_invocation is called when entering the window_function_invocation production. + EnterWindow_function_invocation(c *Window_function_invocationContext) + + // EnterCommon_table_stmt is called when entering the common_table_stmt production. + EnterCommon_table_stmt(c *Common_table_stmtContext) + + // EnterOrder_by_stmt is called when entering the order_by_stmt production. + EnterOrder_by_stmt(c *Order_by_stmtContext) + + // EnterLimit_stmt is called when entering the limit_stmt production. + EnterLimit_stmt(c *Limit_stmtContext) + + // EnterOrdering_term is called when entering the ordering_term production. + EnterOrdering_term(c *Ordering_termContext) + + // EnterAsc_desc is called when entering the asc_desc production. + EnterAsc_desc(c *Asc_descContext) + + // EnterFrame_left is called when entering the frame_left production. + EnterFrame_left(c *Frame_leftContext) + + // EnterFrame_right is called when entering the frame_right production. + EnterFrame_right(c *Frame_rightContext) + + // EnterFrame_single is called when entering the frame_single production. + EnterFrame_single(c *Frame_singleContext) + + // EnterWindow_function is called when entering the window_function production. + EnterWindow_function(c *Window_functionContext) + + // EnterOffset is called when entering the offset production. + EnterOffset(c *OffsetContext) + + // EnterDefault_value is called when entering the default_value production. + EnterDefault_value(c *Default_valueContext) + + // EnterPartition_by is called when entering the partition_by production. + EnterPartition_by(c *Partition_byContext) + + // EnterOrder_by_expr is called when entering the order_by_expr production. + EnterOrder_by_expr(c *Order_by_exprContext) + + // EnterOrder_by_expr_asc_desc is called when entering the order_by_expr_asc_desc production. + EnterOrder_by_expr_asc_desc(c *Order_by_expr_asc_descContext) + + // EnterExpr_asc_desc is called when entering the expr_asc_desc production. + EnterExpr_asc_desc(c *Expr_asc_descContext) + + // EnterInitial_select is called when entering the initial_select production. + EnterInitial_select(c *Initial_selectContext) + + // EnterRecursive_select is called when entering the recursive_select production. + EnterRecursive_select(c *Recursive_selectContext) + + // EnterUnary_operator is called when entering the unary_operator production. + EnterUnary_operator(c *Unary_operatorContext) + + // EnterError_message is called when entering the error_message production. + EnterError_message(c *Error_messageContext) + + // EnterModule_argument is called when entering the module_argument production. + EnterModule_argument(c *Module_argumentContext) + + // EnterColumn_alias is called when entering the column_alias production. + EnterColumn_alias(c *Column_aliasContext) + + // EnterKeyword is called when entering the keyword production. + EnterKeyword(c *KeywordContext) + + // EnterName is called when entering the name production. + EnterName(c *NameContext) + + // EnterFunction_name is called when entering the function_name production. + EnterFunction_name(c *Function_nameContext) + + // EnterSchema_name is called when entering the schema_name production. + EnterSchema_name(c *Schema_nameContext) + + // EnterTable_name is called when entering the table_name production. + EnterTable_name(c *Table_nameContext) + + // EnterTable_or_index_name is called when entering the table_or_index_name production. + EnterTable_or_index_name(c *Table_or_index_nameContext) + + // EnterColumn_name is called when entering the column_name production. + EnterColumn_name(c *Column_nameContext) + + // EnterCollation_name is called when entering the collation_name production. + EnterCollation_name(c *Collation_nameContext) + + // EnterForeign_table is called when entering the foreign_table production. + EnterForeign_table(c *Foreign_tableContext) + + // EnterIndex_name is called when entering the index_name production. + EnterIndex_name(c *Index_nameContext) + + // EnterTrigger_name is called when entering the trigger_name production. + EnterTrigger_name(c *Trigger_nameContext) + + // EnterView_name is called when entering the view_name production. + EnterView_name(c *View_nameContext) + + // EnterModule_name is called when entering the module_name production. + EnterModule_name(c *Module_nameContext) + + // EnterPragma_name is called when entering the pragma_name production. + EnterPragma_name(c *Pragma_nameContext) + + // EnterSavepoint_name is called when entering the savepoint_name production. + EnterSavepoint_name(c *Savepoint_nameContext) + + // EnterTable_alias is called when entering the table_alias production. + EnterTable_alias(c *Table_aliasContext) + + // EnterTransaction_name is called when entering the transaction_name production. + EnterTransaction_name(c *Transaction_nameContext) + + // EnterWindow_name is called when entering the window_name production. + EnterWindow_name(c *Window_nameContext) + + // EnterAlias is called when entering the alias production. + EnterAlias(c *AliasContext) + + // EnterFilename is called when entering the filename production. + EnterFilename(c *FilenameContext) + + // EnterBase_window_name is called when entering the base_window_name production. + EnterBase_window_name(c *Base_window_nameContext) + + // EnterSimple_func is called when entering the simple_func production. + EnterSimple_func(c *Simple_funcContext) + + // EnterAggregate_func is called when entering the aggregate_func production. + EnterAggregate_func(c *Aggregate_funcContext) + + // EnterTable_function_name is called when entering the table_function_name production. + EnterTable_function_name(c *Table_function_nameContext) + + // EnterAny_name is called when entering the any_name production. + EnterAny_name(c *Any_nameContext) + + // ExitParse is called when exiting the parse production. + ExitParse(c *ParseContext) + + // ExitSql_stmt_list is called when exiting the sql_stmt_list production. + ExitSql_stmt_list(c *Sql_stmt_listContext) + + // ExitSql_stmt is called when exiting the sql_stmt production. + ExitSql_stmt(c *Sql_stmtContext) + + // ExitAlter_table_stmt is called when exiting the alter_table_stmt production. + ExitAlter_table_stmt(c *Alter_table_stmtContext) + + // ExitAnalyze_stmt is called when exiting the analyze_stmt production. + ExitAnalyze_stmt(c *Analyze_stmtContext) + + // ExitAttach_stmt is called when exiting the attach_stmt production. + ExitAttach_stmt(c *Attach_stmtContext) + + // ExitBegin_stmt is called when exiting the begin_stmt production. + ExitBegin_stmt(c *Begin_stmtContext) + + // ExitCommit_stmt is called when exiting the commit_stmt production. + ExitCommit_stmt(c *Commit_stmtContext) + + // ExitRollback_stmt is called when exiting the rollback_stmt production. + ExitRollback_stmt(c *Rollback_stmtContext) + + // ExitSavepoint_stmt is called when exiting the savepoint_stmt production. + ExitSavepoint_stmt(c *Savepoint_stmtContext) + + // ExitRelease_stmt is called when exiting the release_stmt production. + ExitRelease_stmt(c *Release_stmtContext) + + // ExitCreate_index_stmt is called when exiting the create_index_stmt production. + ExitCreate_index_stmt(c *Create_index_stmtContext) + + // ExitIndexed_column is called when exiting the indexed_column production. + ExitIndexed_column(c *Indexed_columnContext) + + // ExitCreate_table_stmt is called when exiting the create_table_stmt production. + ExitCreate_table_stmt(c *Create_table_stmtContext) + + // ExitColumn_def is called when exiting the column_def production. + ExitColumn_def(c *Column_defContext) + + // ExitType_name is called when exiting the type_name production. + ExitType_name(c *Type_nameContext) + + // ExitColumn_constraint is called when exiting the column_constraint production. + ExitColumn_constraint(c *Column_constraintContext) + + // ExitSigned_number is called when exiting the signed_number production. + ExitSigned_number(c *Signed_numberContext) + + // ExitTable_constraint is called when exiting the table_constraint production. + ExitTable_constraint(c *Table_constraintContext) + + // ExitForeign_key_clause is called when exiting the foreign_key_clause production. + ExitForeign_key_clause(c *Foreign_key_clauseContext) + + // ExitConflict_clause is called when exiting the conflict_clause production. + ExitConflict_clause(c *Conflict_clauseContext) + + // ExitCreate_trigger_stmt is called when exiting the create_trigger_stmt production. + ExitCreate_trigger_stmt(c *Create_trigger_stmtContext) + + // ExitCreate_view_stmt is called when exiting the create_view_stmt production. + ExitCreate_view_stmt(c *Create_view_stmtContext) + + // ExitCreate_virtual_table_stmt is called when exiting the create_virtual_table_stmt production. + ExitCreate_virtual_table_stmt(c *Create_virtual_table_stmtContext) + + // ExitWith_clause is called when exiting the with_clause production. + ExitWith_clause(c *With_clauseContext) + + // ExitCte_table_name is called when exiting the cte_table_name production. + ExitCte_table_name(c *Cte_table_nameContext) + + // ExitRecursive_cte is called when exiting the recursive_cte production. + ExitRecursive_cte(c *Recursive_cteContext) + + // ExitCommon_table_expression is called when exiting the common_table_expression production. + ExitCommon_table_expression(c *Common_table_expressionContext) + + // ExitDelete_stmt is called when exiting the delete_stmt production. + ExitDelete_stmt(c *Delete_stmtContext) + + // ExitDelete_stmt_limited is called when exiting the delete_stmt_limited production. + ExitDelete_stmt_limited(c *Delete_stmt_limitedContext) + + // ExitDetach_stmt is called when exiting the detach_stmt production. + ExitDetach_stmt(c *Detach_stmtContext) + + // ExitDrop_stmt is called when exiting the drop_stmt production. + ExitDrop_stmt(c *Drop_stmtContext) + + // ExitExpr is called when exiting the expr production. + ExitExpr(c *ExprContext) + + // ExitRaise_function is called when exiting the raise_function production. + ExitRaise_function(c *Raise_functionContext) + + // ExitLiteral_value is called when exiting the literal_value production. + ExitLiteral_value(c *Literal_valueContext) + + // ExitValue_row is called when exiting the value_row production. + ExitValue_row(c *Value_rowContext) + + // ExitValues_clause is called when exiting the values_clause production. + ExitValues_clause(c *Values_clauseContext) + + // ExitInsert_stmt is called when exiting the insert_stmt production. + ExitInsert_stmt(c *Insert_stmtContext) + + // ExitReturning_clause is called when exiting the returning_clause production. + ExitReturning_clause(c *Returning_clauseContext) + + // ExitUpsert_clause is called when exiting the upsert_clause production. + ExitUpsert_clause(c *Upsert_clauseContext) + + // ExitPragma_stmt is called when exiting the pragma_stmt production. + ExitPragma_stmt(c *Pragma_stmtContext) + + // ExitPragma_value is called when exiting the pragma_value production. + ExitPragma_value(c *Pragma_valueContext) + + // ExitReindex_stmt is called when exiting the reindex_stmt production. + ExitReindex_stmt(c *Reindex_stmtContext) + + // ExitSelect_stmt is called when exiting the select_stmt production. + ExitSelect_stmt(c *Select_stmtContext) + + // ExitJoin_clause is called when exiting the join_clause production. + ExitJoin_clause(c *Join_clauseContext) + + // ExitSelect_core is called when exiting the select_core production. + ExitSelect_core(c *Select_coreContext) + + // ExitFactored_select_stmt is called when exiting the factored_select_stmt production. + ExitFactored_select_stmt(c *Factored_select_stmtContext) + + // ExitSimple_select_stmt is called when exiting the simple_select_stmt production. + ExitSimple_select_stmt(c *Simple_select_stmtContext) + + // ExitCompound_select_stmt is called when exiting the compound_select_stmt production. + ExitCompound_select_stmt(c *Compound_select_stmtContext) + + // ExitTable_or_subquery is called when exiting the table_or_subquery production. + ExitTable_or_subquery(c *Table_or_subqueryContext) + + // ExitResult_column is called when exiting the result_column production. + ExitResult_column(c *Result_columnContext) + + // ExitJoin_operator is called when exiting the join_operator production. + ExitJoin_operator(c *Join_operatorContext) + + // ExitJoin_constraint is called when exiting the join_constraint production. + ExitJoin_constraint(c *Join_constraintContext) + + // ExitCompound_operator is called when exiting the compound_operator production. + ExitCompound_operator(c *Compound_operatorContext) + + // ExitUpdate_stmt is called when exiting the update_stmt production. + ExitUpdate_stmt(c *Update_stmtContext) + + // ExitColumn_name_list is called when exiting the column_name_list production. + ExitColumn_name_list(c *Column_name_listContext) + + // ExitUpdate_stmt_limited is called when exiting the update_stmt_limited production. + ExitUpdate_stmt_limited(c *Update_stmt_limitedContext) + + // ExitQualified_table_name is called when exiting the qualified_table_name production. + ExitQualified_table_name(c *Qualified_table_nameContext) + + // ExitVacuum_stmt is called when exiting the vacuum_stmt production. + ExitVacuum_stmt(c *Vacuum_stmtContext) + + // ExitFilter_clause is called when exiting the filter_clause production. + ExitFilter_clause(c *Filter_clauseContext) + + // ExitWindow_defn is called when exiting the window_defn production. + ExitWindow_defn(c *Window_defnContext) + + // ExitOver_clause is called when exiting the over_clause production. + ExitOver_clause(c *Over_clauseContext) + + // ExitFrame_spec is called when exiting the frame_spec production. + ExitFrame_spec(c *Frame_specContext) + + // ExitFrame_clause is called when exiting the frame_clause production. + ExitFrame_clause(c *Frame_clauseContext) + + // ExitSimple_function_invocation is called when exiting the simple_function_invocation production. + ExitSimple_function_invocation(c *Simple_function_invocationContext) + + // ExitAggregate_function_invocation is called when exiting the aggregate_function_invocation production. + ExitAggregate_function_invocation(c *Aggregate_function_invocationContext) + + // ExitWindow_function_invocation is called when exiting the window_function_invocation production. + ExitWindow_function_invocation(c *Window_function_invocationContext) + + // ExitCommon_table_stmt is called when exiting the common_table_stmt production. + ExitCommon_table_stmt(c *Common_table_stmtContext) + + // ExitOrder_by_stmt is called when exiting the order_by_stmt production. + ExitOrder_by_stmt(c *Order_by_stmtContext) + + // ExitLimit_stmt is called when exiting the limit_stmt production. + ExitLimit_stmt(c *Limit_stmtContext) + + // ExitOrdering_term is called when exiting the ordering_term production. + ExitOrdering_term(c *Ordering_termContext) + + // ExitAsc_desc is called when exiting the asc_desc production. + ExitAsc_desc(c *Asc_descContext) + + // ExitFrame_left is called when exiting the frame_left production. + ExitFrame_left(c *Frame_leftContext) + + // ExitFrame_right is called when exiting the frame_right production. + ExitFrame_right(c *Frame_rightContext) + + // ExitFrame_single is called when exiting the frame_single production. + ExitFrame_single(c *Frame_singleContext) + + // ExitWindow_function is called when exiting the window_function production. + ExitWindow_function(c *Window_functionContext) + + // ExitOffset is called when exiting the offset production. + ExitOffset(c *OffsetContext) + + // ExitDefault_value is called when exiting the default_value production. + ExitDefault_value(c *Default_valueContext) + + // ExitPartition_by is called when exiting the partition_by production. + ExitPartition_by(c *Partition_byContext) + + // ExitOrder_by_expr is called when exiting the order_by_expr production. + ExitOrder_by_expr(c *Order_by_exprContext) + + // ExitOrder_by_expr_asc_desc is called when exiting the order_by_expr_asc_desc production. + ExitOrder_by_expr_asc_desc(c *Order_by_expr_asc_descContext) + + // ExitExpr_asc_desc is called when exiting the expr_asc_desc production. + ExitExpr_asc_desc(c *Expr_asc_descContext) + + // ExitInitial_select is called when exiting the initial_select production. + ExitInitial_select(c *Initial_selectContext) + + // ExitRecursive_select is called when exiting the recursive_select production. + ExitRecursive_select(c *Recursive_selectContext) + + // ExitUnary_operator is called when exiting the unary_operator production. + ExitUnary_operator(c *Unary_operatorContext) + + // ExitError_message is called when exiting the error_message production. + ExitError_message(c *Error_messageContext) + + // ExitModule_argument is called when exiting the module_argument production. + ExitModule_argument(c *Module_argumentContext) + + // ExitColumn_alias is called when exiting the column_alias production. + ExitColumn_alias(c *Column_aliasContext) + + // ExitKeyword is called when exiting the keyword production. + ExitKeyword(c *KeywordContext) + + // ExitName is called when exiting the name production. + ExitName(c *NameContext) + + // ExitFunction_name is called when exiting the function_name production. + ExitFunction_name(c *Function_nameContext) + + // ExitSchema_name is called when exiting the schema_name production. + ExitSchema_name(c *Schema_nameContext) + + // ExitTable_name is called when exiting the table_name production. + ExitTable_name(c *Table_nameContext) + + // ExitTable_or_index_name is called when exiting the table_or_index_name production. + ExitTable_or_index_name(c *Table_or_index_nameContext) + + // ExitColumn_name is called when exiting the column_name production. + ExitColumn_name(c *Column_nameContext) + + // ExitCollation_name is called when exiting the collation_name production. + ExitCollation_name(c *Collation_nameContext) + + // ExitForeign_table is called when exiting the foreign_table production. + ExitForeign_table(c *Foreign_tableContext) + + // ExitIndex_name is called when exiting the index_name production. + ExitIndex_name(c *Index_nameContext) + + // ExitTrigger_name is called when exiting the trigger_name production. + ExitTrigger_name(c *Trigger_nameContext) + + // ExitView_name is called when exiting the view_name production. + ExitView_name(c *View_nameContext) + + // ExitModule_name is called when exiting the module_name production. + ExitModule_name(c *Module_nameContext) + + // ExitPragma_name is called when exiting the pragma_name production. + ExitPragma_name(c *Pragma_nameContext) + + // ExitSavepoint_name is called when exiting the savepoint_name production. + ExitSavepoint_name(c *Savepoint_nameContext) + + // ExitTable_alias is called when exiting the table_alias production. + ExitTable_alias(c *Table_aliasContext) + + // ExitTransaction_name is called when exiting the transaction_name production. + ExitTransaction_name(c *Transaction_nameContext) + + // ExitWindow_name is called when exiting the window_name production. + ExitWindow_name(c *Window_nameContext) + + // ExitAlias is called when exiting the alias production. + ExitAlias(c *AliasContext) + + // ExitFilename is called when exiting the filename production. + ExitFilename(c *FilenameContext) + + // ExitBase_window_name is called when exiting the base_window_name production. + ExitBase_window_name(c *Base_window_nameContext) + + // ExitSimple_func is called when exiting the simple_func production. + ExitSimple_func(c *Simple_funcContext) + + // ExitAggregate_func is called when exiting the aggregate_func production. + ExitAggregate_func(c *Aggregate_funcContext) + + // ExitTable_function_name is called when exiting the table_function_name production. + ExitTable_function_name(c *Table_function_nameContext) + + // ExitAny_name is called when exiting the any_name production. + ExitAny_name(c *Any_nameContext) +} diff --git a/vendor/github.com/libsql/sqlite-antlr4-parser/sqliteparserutils/utils.go b/vendor/github.com/libsql/sqlite-antlr4-parser/sqliteparserutils/utils.go index 148d159ad2..e9a53765bd 100644 --- a/vendor/github.com/libsql/sqlite-antlr4-parser/sqliteparserutils/utils.go +++ b/vendor/github.com/libsql/sqlite-antlr4-parser/sqliteparserutils/utils.go @@ -1,117 +1,117 @@ -package sqliteparserutils - -import ( - "github.com/antlr/antlr4/runtime/Go/antlr/v4" - "github.com/libsql/sqlite-antlr4-parser/sqliteparser" -) - -// TODO: Shell test begin transaction on shell - -type SplitStatementExtraInfo struct { - IncompleteCreateTriggerStatement bool - IncompleteMultilineComment bool - LastTokenType int -} - -func SplitStatement(statement string) (stmts []string, extraInfo SplitStatementExtraInfo) { - tokenStream := createTokenStream(statement) - - stmtIntervals := make([]*antlr.Interval, 0) - currentIntervalStart := -1 - insideCreateTriggerStmt := false - insideMultilineComment := false - - var previousToken antlr.Token - var currentToken antlr.Token - for currentToken = tokenStream.LT(1); currentToken.GetTokenType() != antlr.TokenEOF; currentToken = tokenStream.LT(1) { - // We break loop here because we're sure multiline comment didn't finished, otherwise lexer would have just ignored - // it - if atIncompleteMultilineCommentStart(tokenStream) { - insideMultilineComment = true - break - } - - if currentIntervalStart == -1 { - if currentToken.GetTokenType() == sqliteparser.SQLiteLexerSCOL { - previousToken = currentToken - tokenStream.Consume() - continue - } - currentIntervalStart = currentToken.GetTokenIndex() - - if atCreateTriggerStart(tokenStream) { - insideCreateTriggerStmt = true - previousToken = currentToken - tokenStream.Consume() - continue - } - - } - - if insideCreateTriggerStmt { - if currentToken.GetTokenType() == sqliteparser.SQLiteLexerEND_ { - insideCreateTriggerStmt = false - } - } else if currentToken.GetTokenType() == sqliteparser.SQLiteLexerSCOL { - stmtIntervals = append(stmtIntervals, antlr.NewInterval(currentIntervalStart, previousToken.GetTokenIndex())) - currentIntervalStart = -1 - } - - previousToken = currentToken - tokenStream.Consume() - } - - if currentIntervalStart != -1 && previousToken != nil { - stmtIntervals = append(stmtIntervals, antlr.NewInterval(currentIntervalStart, previousToken.GetTokenIndex())) - } - - stmts = make([]string, 0) - for _, stmtInterval := range stmtIntervals { - stmts = append(stmts, tokenStream.GetTextFromInterval(stmtInterval)) - } - - lastTokenType := antlr.TokenInvalidType - if previousToken != nil { - lastTokenType = previousToken.GetTokenType() - } - return stmts, SplitStatementExtraInfo{IncompleteCreateTriggerStatement: insideCreateTriggerStmt, IncompleteMultilineComment: insideMultilineComment, LastTokenType: lastTokenType} -} - -func atCreateTriggerStart(tokenStream antlr.TokenStream) bool { - if tokenStream.LT(1).GetTokenType() != sqliteparser.SQLiteLexerCREATE_ { - return false - } - - if tokenStream.LT(2).GetTokenType() == sqliteparser.SQLiteLexerTRIGGER_ { - return true - } - - if tokenStream.LT(2).GetTokenType() == sqliteparser.SQLiteLexerTEMP_ || tokenStream.LT(2).GetTokenType() == sqliteparser.SQLiteLexerTEMPORARY_ && - tokenStream.LT(3).GetTokenType() == sqliteparser.SQLiteLexerTRIGGER_ { - return true - } - - return false - -} - -// Note: Only starts for incomplete multiline comments will be detected cause lexer automatically ignores complete -// multiline comments -func atIncompleteMultilineCommentStart(tokenStream antlr.TokenStream) bool { - if tokenStream.LT(1).GetTokenType() != sqliteparser.SQLiteLexerDIV { - return false - } - - if tokenStream.LT(2).GetTokenType() == sqliteparser.SQLiteLexerSTAR { - return true - } - - return false -} - -func createTokenStream(statement string) *antlr.CommonTokenStream { - statementStream := antlr.NewInputStream(statement) - - lexer := sqliteparser.NewSQLiteLexer(statementStream) - return antlr.NewCommonTokenStream(lexer, 0) -} +package sqliteparserutils + +import ( + "github.com/antlr/antlr4/runtime/Go/antlr/v4" + "github.com/libsql/sqlite-antlr4-parser/sqliteparser" +) + +// TODO: Shell test begin transaction on shell + +type SplitStatementExtraInfo struct { + IncompleteCreateTriggerStatement bool + IncompleteMultilineComment bool + LastTokenType int +} + +func SplitStatement(statement string) (stmts []string, extraInfo SplitStatementExtraInfo) { + tokenStream := createTokenStream(statement) + + stmtIntervals := make([]*antlr.Interval, 0) + currentIntervalStart := -1 + insideCreateTriggerStmt := false + insideMultilineComment := false + + var previousToken antlr.Token + var currentToken antlr.Token + for currentToken = tokenStream.LT(1); currentToken.GetTokenType() != antlr.TokenEOF; currentToken = tokenStream.LT(1) { + // We break loop here because we're sure multiline comment didn't finished, otherwise lexer would have just ignored + // it + if atIncompleteMultilineCommentStart(tokenStream) { + insideMultilineComment = true + break + } + + if currentIntervalStart == -1 { + if currentToken.GetTokenType() == sqliteparser.SQLiteLexerSCOL { + previousToken = currentToken + tokenStream.Consume() + continue + } + currentIntervalStart = currentToken.GetTokenIndex() + + if atCreateTriggerStart(tokenStream) { + insideCreateTriggerStmt = true + previousToken = currentToken + tokenStream.Consume() + continue + } + + } + + if insideCreateTriggerStmt { + if currentToken.GetTokenType() == sqliteparser.SQLiteLexerEND_ { + insideCreateTriggerStmt = false + } + } else if currentToken.GetTokenType() == sqliteparser.SQLiteLexerSCOL { + stmtIntervals = append(stmtIntervals, antlr.NewInterval(currentIntervalStart, previousToken.GetTokenIndex())) + currentIntervalStart = -1 + } + + previousToken = currentToken + tokenStream.Consume() + } + + if currentIntervalStart != -1 && previousToken != nil { + stmtIntervals = append(stmtIntervals, antlr.NewInterval(currentIntervalStart, previousToken.GetTokenIndex())) + } + + stmts = make([]string, 0) + for _, stmtInterval := range stmtIntervals { + stmts = append(stmts, tokenStream.GetTextFromInterval(stmtInterval)) + } + + lastTokenType := antlr.TokenInvalidType + if previousToken != nil { + lastTokenType = previousToken.GetTokenType() + } + return stmts, SplitStatementExtraInfo{IncompleteCreateTriggerStatement: insideCreateTriggerStmt, IncompleteMultilineComment: insideMultilineComment, LastTokenType: lastTokenType} +} + +func atCreateTriggerStart(tokenStream antlr.TokenStream) bool { + if tokenStream.LT(1).GetTokenType() != sqliteparser.SQLiteLexerCREATE_ { + return false + } + + if tokenStream.LT(2).GetTokenType() == sqliteparser.SQLiteLexerTRIGGER_ { + return true + } + + if tokenStream.LT(2).GetTokenType() == sqliteparser.SQLiteLexerTEMP_ || tokenStream.LT(2).GetTokenType() == sqliteparser.SQLiteLexerTEMPORARY_ && + tokenStream.LT(3).GetTokenType() == sqliteparser.SQLiteLexerTRIGGER_ { + return true + } + + return false + +} + +// Note: Only starts for incomplete multiline comments will be detected cause lexer automatically ignores complete +// multiline comments +func atIncompleteMultilineCommentStart(tokenStream antlr.TokenStream) bool { + if tokenStream.LT(1).GetTokenType() != sqliteparser.SQLiteLexerDIV { + return false + } + + if tokenStream.LT(2).GetTokenType() == sqliteparser.SQLiteLexerSTAR { + return true + } + + return false +} + +func createTokenStream(statement string) *antlr.CommonTokenStream { + statementStream := antlr.NewInputStream(statement) + + lexer := sqliteparser.NewSQLiteLexer(statementStream) + return antlr.NewCommonTokenStream(lexer, 0) +} diff --git a/vendor/github.com/tursodatabase/libsql-client-go/LICENSE b/vendor/github.com/tursodatabase/libsql-client-go/LICENSE index 46782b434e..136c0fff7d 100644 --- a/vendor/github.com/tursodatabase/libsql-client-go/LICENSE +++ b/vendor/github.com/tursodatabase/libsql-client-go/LICENSE @@ -1,21 +1,21 @@ -MIT License - -Copyright (c) 2023 libSQL - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. +MIT License + +Copyright (c) 2023 libSQL + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/hrana/batch.go b/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/hrana/batch.go index cb2443b235..ca9f5946b6 100644 --- a/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/hrana/batch.go +++ b/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/hrana/batch.go @@ -1,22 +1,22 @@ -package hrana - -type Batch struct { - Steps []BatchStep `json:"steps"` - ReplicationIndex *uint64 `json:"replication_index,omitempty"` -} - -type BatchStep struct { - Stmt Stmt `json:"stmt"` - Condition *BatchCondition `json:"condition,omitempty"` -} - -type BatchCondition struct { - Type string `json:"type"` - Step *int32 `json:"step,omitempty"` - Cond *BatchCondition `json:"cond,omitempty"` - Conds []BatchCondition `json:"conds,omitempty"` -} - -func (b *Batch) Add(stmt Stmt) { - b.Steps = append(b.Steps, BatchStep{Stmt: stmt}) -} +package hrana + +type Batch struct { + Steps []BatchStep `json:"steps"` + ReplicationIndex *uint64 `json:"replication_index,omitempty"` +} + +type BatchStep struct { + Stmt Stmt `json:"stmt"` + Condition *BatchCondition `json:"condition,omitempty"` +} + +type BatchCondition struct { + Type string `json:"type"` + Step *int32 `json:"step,omitempty"` + Cond *BatchCondition `json:"cond,omitempty"` + Conds []BatchCondition `json:"conds,omitempty"` +} + +func (b *Batch) Add(stmt Stmt) { + b.Steps = append(b.Steps, BatchStep{Stmt: stmt}) +} diff --git a/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/hrana/batch_result.go b/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/hrana/batch_result.go index c438d4c9c9..2dc64a5781 100644 --- a/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/hrana/batch_result.go +++ b/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/hrana/batch_result.go @@ -1,49 +1,49 @@ -package hrana - -import ( - "encoding/json" - "fmt" - "strconv" -) - -type BatchResult struct { - StepResults []*StmtResult `json:"step_results"` - StepErrors []*Error `json:"step_errors"` - ReplicationIndex *uint64 `json:"replication_index"` -} - -func (b *BatchResult) UnmarshalJSON(data []byte) error { - type Alias BatchResult - aux := &struct { - ReplicationIndex interface{} `json:"replication_index,omitempty"` - *Alias - }{ - Alias: (*Alias)(b), - } - - if err := json.Unmarshal(data, &aux); err != nil { - return err - } - - if aux.ReplicationIndex == nil { - return nil - } - - switch v := aux.ReplicationIndex.(type) { - case float64: - repIndex := uint64(v) - b.ReplicationIndex = &repIndex - case string: - if v == "" { - return nil - } - repIndex, err := strconv.ParseUint(v, 10, 64) - if err != nil { - return err - } - b.ReplicationIndex = &repIndex - default: - return fmt.Errorf("invalid type for replication index: %T", v) - } - return nil -} +package hrana + +import ( + "encoding/json" + "fmt" + "strconv" +) + +type BatchResult struct { + StepResults []*StmtResult `json:"step_results"` + StepErrors []*Error `json:"step_errors"` + ReplicationIndex *uint64 `json:"replication_index"` +} + +func (b *BatchResult) UnmarshalJSON(data []byte) error { + type Alias BatchResult + aux := &struct { + ReplicationIndex interface{} `json:"replication_index,omitempty"` + *Alias + }{ + Alias: (*Alias)(b), + } + + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + + if aux.ReplicationIndex == nil { + return nil + } + + switch v := aux.ReplicationIndex.(type) { + case float64: + repIndex := uint64(v) + b.ReplicationIndex = &repIndex + case string: + if v == "" { + return nil + } + repIndex, err := strconv.ParseUint(v, 10, 64) + if err != nil { + return err + } + b.ReplicationIndex = &repIndex + default: + return fmt.Errorf("invalid type for replication index: %T", v) + } + return nil +} diff --git a/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/hrana/pipeline_request.go b/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/hrana/pipeline_request.go index 39fc709c68..49da3bba10 100644 --- a/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/hrana/pipeline_request.go +++ b/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/hrana/pipeline_request.go @@ -1,10 +1,10 @@ -package hrana - -type PipelineRequest struct { - Baton string `json:"baton,omitempty"` - Requests []StreamRequest `json:"requests"` -} - -func (pr *PipelineRequest) Add(request StreamRequest) { - pr.Requests = append(pr.Requests, request) -} +package hrana + +type PipelineRequest struct { + Baton string `json:"baton,omitempty"` + Requests []StreamRequest `json:"requests"` +} + +func (pr *PipelineRequest) Add(request StreamRequest) { + pr.Requests = append(pr.Requests, request) +} diff --git a/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/hrana/pipeline_response.go b/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/hrana/pipeline_response.go index c97e1819eb..e85ee95717 100644 --- a/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/hrana/pipeline_response.go +++ b/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/hrana/pipeline_response.go @@ -1,7 +1,7 @@ -package hrana - -type PipelineResponse struct { - Baton string `json:"baton,omitempty"` - BaseUrl string `json:"base_url,omitempty"` - Results []StreamResult `json:"results"` -} +package hrana + +type PipelineResponse struct { + Baton string `json:"baton,omitempty"` + BaseUrl string `json:"base_url,omitempty"` + Results []StreamResult `json:"results"` +} diff --git a/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/hrana/stmt.go b/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/hrana/stmt.go index 71704867a7..eeddc486cd 100644 --- a/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/hrana/stmt.go +++ b/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/hrana/stmt.go @@ -1,58 +1,58 @@ -package hrana - -import ( - "github.com/tursodatabase/libsql-client-go/libsql/internal/http/shared" -) - -type Stmt struct { - Sql *string `json:"sql,omitempty"` - SqlId *int32 `json:"sql_id,omitempty"` - Args []Value `json:"args,omitempty"` - NamedArgs []NamedArg `json:"named_args,omitempty"` - WantRows bool `json:"want_rows"` - ReplicationIndex *uint64 `json:"replication_index,omitempty"` -} - -type NamedArg struct { - Name string `json:"name"` - Value Value `json:"value"` -} - -func (s *Stmt) AddArgs(params shared.Params) error { - if len(params.Named()) > 0 { - return s.AddNamedArgs(params.Named()) - } else { - return s.AddPositionalArgs(params.Positional()) - } -} - -func (s *Stmt) AddPositionalArgs(args []any) error { - argValues := make([]Value, len(args)) - for idx := range args { - var err error - if argValues[idx], err = ToValue(args[idx]); err != nil { - return err - } - } - s.Args = argValues - return nil -} - -func (s *Stmt) AddNamedArgs(args map[string]any) error { - argValues := make([]NamedArg, len(args)) - idx := 0 - for key, value := range args { - var err error - var v Value - if v, err = ToValue(value); err != nil { - return err - } - argValues[idx] = NamedArg{ - Name: key, - Value: v, - } - idx++ - } - s.NamedArgs = argValues - return nil -} +package hrana + +import ( + "github.com/tursodatabase/libsql-client-go/libsql/internal/http/shared" +) + +type Stmt struct { + Sql *string `json:"sql,omitempty"` + SqlId *int32 `json:"sql_id,omitempty"` + Args []Value `json:"args,omitempty"` + NamedArgs []NamedArg `json:"named_args,omitempty"` + WantRows bool `json:"want_rows"` + ReplicationIndex *uint64 `json:"replication_index,omitempty"` +} + +type NamedArg struct { + Name string `json:"name"` + Value Value `json:"value"` +} + +func (s *Stmt) AddArgs(params shared.Params) error { + if len(params.Named()) > 0 { + return s.AddNamedArgs(params.Named()) + } else { + return s.AddPositionalArgs(params.Positional()) + } +} + +func (s *Stmt) AddPositionalArgs(args []any) error { + argValues := make([]Value, len(args)) + for idx := range args { + var err error + if argValues[idx], err = ToValue(args[idx]); err != nil { + return err + } + } + s.Args = argValues + return nil +} + +func (s *Stmt) AddNamedArgs(args map[string]any) error { + argValues := make([]NamedArg, len(args)) + idx := 0 + for key, value := range args { + var err error + var v Value + if v, err = ToValue(value); err != nil { + return err + } + argValues[idx] = NamedArg{ + Name: key, + Value: v, + } + idx++ + } + s.NamedArgs = argValues + return nil +} diff --git a/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/hrana/stmt_result.go b/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/hrana/stmt_result.go index 8f32aec45c..8fe0e5b46b 100644 --- a/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/hrana/stmt_result.go +++ b/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/hrana/stmt_result.go @@ -1,65 +1,65 @@ -package hrana - -import ( - "encoding/json" - "fmt" - "strconv" -) - -type Column struct { - Name *string `json:"name"` - Type *string `json:"decltype"` -} - -type StmtResult struct { - Cols []Column `json:"cols"` - Rows [][]Value `json:"rows"` - AffectedRowCount int32 `json:"affected_row_count"` - LastInsertRowId *string `json:"last_insert_rowid"` - ReplicationIndex *uint64 `json:"replication_index"` -} - -func (r *StmtResult) GetLastInsertRowId() int64 { - if r.LastInsertRowId != nil { - if integer, err := strconv.ParseInt(*r.LastInsertRowId, 10, 64); err == nil { - return integer - } - } - return 0 -} - -func (r *StmtResult) UnmarshalJSON(data []byte) error { - type Alias StmtResult - aux := &struct { - ReplicationIndex interface{} `json:"replication_index,omitempty"` - *Alias - }{ - Alias: (*Alias)(r), - } - - if err := json.Unmarshal(data, &aux); err != nil { - return err - } - - if aux.ReplicationIndex == nil { - return nil - } - - switch v := aux.ReplicationIndex.(type) { - case float64: - repIndex := uint64(v) - r.ReplicationIndex = &repIndex - case string: - if v == "" { - return nil - } - repIndex, err := strconv.ParseUint(v, 10, 64) - if err != nil { - return err - } - r.ReplicationIndex = &repIndex - default: - return fmt.Errorf("invalid type for replication index: %T", v) - } - return nil -} +package hrana + +import ( + "encoding/json" + "fmt" + "strconv" +) + +type Column struct { + Name *string `json:"name"` + Type *string `json:"decltype"` +} + +type StmtResult struct { + Cols []Column `json:"cols"` + Rows [][]Value `json:"rows"` + AffectedRowCount int32 `json:"affected_row_count"` + LastInsertRowId *string `json:"last_insert_rowid"` + ReplicationIndex *uint64 `json:"replication_index"` +} + +func (r *StmtResult) GetLastInsertRowId() int64 { + if r.LastInsertRowId != nil { + if integer, err := strconv.ParseInt(*r.LastInsertRowId, 10, 64); err == nil { + return integer + } + } + return 0 +} + +func (r *StmtResult) UnmarshalJSON(data []byte) error { + type Alias StmtResult + aux := &struct { + ReplicationIndex interface{} `json:"replication_index,omitempty"` + *Alias + }{ + Alias: (*Alias)(r), + } + + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + + if aux.ReplicationIndex == nil { + return nil + } + + switch v := aux.ReplicationIndex.(type) { + case float64: + repIndex := uint64(v) + r.ReplicationIndex = &repIndex + case string: + if v == "" { + return nil + } + repIndex, err := strconv.ParseUint(v, 10, 64) + if err != nil { + return err + } + r.ReplicationIndex = &repIndex + default: + return fmt.Errorf("invalid type for replication index: %T", v) + } + return nil +} diff --git a/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/hrana/stream_request.go b/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/hrana/stream_request.go index 5d27b44697..c077254e82 100644 --- a/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/hrana/stream_request.go +++ b/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/hrana/stream_request.go @@ -1,63 +1,63 @@ -package hrana - -import ( - "github.com/tursodatabase/libsql-client-go/libsql/internal/http/shared" -) - -type StreamRequest struct { - Type string `json:"type"` - Stmt *Stmt `json:"stmt,omitempty"` - Batch *Batch `json:"batch,omitempty"` - Sql *string `json:"sql,omitempty"` - SqlId *int32 `json:"sql_id,omitempty"` -} - -func CloseStream() StreamRequest { - return StreamRequest{Type: "close"} -} - -func ExecuteStream(sql string, params shared.Params, wantRows bool) (*StreamRequest, error) { - stmt := &Stmt{ - Sql: &sql, - WantRows: wantRows, - } - if err := stmt.AddArgs(params); err != nil { - return nil, err - } - return &StreamRequest{Type: "execute", Stmt: stmt}, nil -} - -func ExecuteStoredStream(sqlId int32, params shared.Params, wantRows bool) (*StreamRequest, error) { - stmt := &Stmt{ - SqlId: &sqlId, - WantRows: wantRows, - } - if err := stmt.AddArgs(params); err != nil { - return nil, err - } - return &StreamRequest{Type: "execute", Stmt: stmt}, nil -} - -func BatchStream(sqls []string, params []shared.Params, wantRows bool) (*StreamRequest, error) { - batch := &Batch{} - for idx, sql := range sqls { - s := sql - stmt := &Stmt{ - Sql: &s, - WantRows: wantRows, - } - if err := stmt.AddArgs(params[idx]); err != nil { - return nil, err - } - batch.Add(*stmt) - } - return &StreamRequest{Type: "batch", Batch: batch}, nil -} - -func StoreSqlStream(sql string, sqlId int32) StreamRequest { - return StreamRequest{Type: "store_sql", Sql: &sql, SqlId: &sqlId} -} - -func CloseStoredSqlStream(sqlId int32) StreamRequest { - return StreamRequest{Type: "close_sql", SqlId: &sqlId} -} +package hrana + +import ( + "github.com/tursodatabase/libsql-client-go/libsql/internal/http/shared" +) + +type StreamRequest struct { + Type string `json:"type"` + Stmt *Stmt `json:"stmt,omitempty"` + Batch *Batch `json:"batch,omitempty"` + Sql *string `json:"sql,omitempty"` + SqlId *int32 `json:"sql_id,omitempty"` +} + +func CloseStream() StreamRequest { + return StreamRequest{Type: "close"} +} + +func ExecuteStream(sql string, params shared.Params, wantRows bool) (*StreamRequest, error) { + stmt := &Stmt{ + Sql: &sql, + WantRows: wantRows, + } + if err := stmt.AddArgs(params); err != nil { + return nil, err + } + return &StreamRequest{Type: "execute", Stmt: stmt}, nil +} + +func ExecuteStoredStream(sqlId int32, params shared.Params, wantRows bool) (*StreamRequest, error) { + stmt := &Stmt{ + SqlId: &sqlId, + WantRows: wantRows, + } + if err := stmt.AddArgs(params); err != nil { + return nil, err + } + return &StreamRequest{Type: "execute", Stmt: stmt}, nil +} + +func BatchStream(sqls []string, params []shared.Params, wantRows bool) (*StreamRequest, error) { + batch := &Batch{} + for idx, sql := range sqls { + s := sql + stmt := &Stmt{ + Sql: &s, + WantRows: wantRows, + } + if err := stmt.AddArgs(params[idx]); err != nil { + return nil, err + } + batch.Add(*stmt) + } + return &StreamRequest{Type: "batch", Batch: batch}, nil +} + +func StoreSqlStream(sql string, sqlId int32) StreamRequest { + return StreamRequest{Type: "store_sql", Sql: &sql, SqlId: &sqlId} +} + +func CloseStoredSqlStream(sqlId int32) StreamRequest { + return StreamRequest{Type: "close_sql", SqlId: &sqlId} +} diff --git a/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/hrana/stream_result.go b/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/hrana/stream_result.go index 0028d657bd..2099cedeaf 100644 --- a/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/hrana/stream_result.go +++ b/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/hrana/stream_result.go @@ -1,52 +1,52 @@ -package hrana - -import ( - "encoding/json" - "errors" - "fmt" -) - -type StreamResult struct { - Type string `json:"type"` - Response *StreamResponse `json:"response,omitempty"` - Error *Error `json:"error,omitempty"` -} - -type StreamResponse struct { - Type string `json:"type"` - Result json.RawMessage `json:"result,omitempty"` -} - -func (r *StreamResponse) ExecuteResult() (*StmtResult, error) { - if r.Type != "execute" { - return nil, fmt.Errorf("invalid response type: %s", r.Type) - } - - var res StmtResult - if err := json.Unmarshal(r.Result, &res); err != nil { - return nil, err - } - return &res, nil -} - -func (r *StreamResponse) BatchResult() (*BatchResult, error) { - if r.Type != "batch" { - return nil, fmt.Errorf("invalid response type: %s", r.Type) - } - - var res BatchResult - if err := json.Unmarshal(r.Result, &res); err != nil { - return nil, err - } - for _, e := range res.StepErrors { - if e != nil { - return nil, errors.New(e.Message) - } - } - return &res, nil -} - -type Error struct { - Message string `json:"message"` - Code *string `json:"code,omitempty"` -} +package hrana + +import ( + "encoding/json" + "errors" + "fmt" +) + +type StreamResult struct { + Type string `json:"type"` + Response *StreamResponse `json:"response,omitempty"` + Error *Error `json:"error,omitempty"` +} + +type StreamResponse struct { + Type string `json:"type"` + Result json.RawMessage `json:"result,omitempty"` +} + +func (r *StreamResponse) ExecuteResult() (*StmtResult, error) { + if r.Type != "execute" { + return nil, fmt.Errorf("invalid response type: %s", r.Type) + } + + var res StmtResult + if err := json.Unmarshal(r.Result, &res); err != nil { + return nil, err + } + return &res, nil +} + +func (r *StreamResponse) BatchResult() (*BatchResult, error) { + if r.Type != "batch" { + return nil, fmt.Errorf("invalid response type: %s", r.Type) + } + + var res BatchResult + if err := json.Unmarshal(r.Result, &res); err != nil { + return nil, err + } + for _, e := range res.StepErrors { + if e != nil { + return nil, errors.New(e.Message) + } + } + return &res, nil +} + +type Error struct { + Message string `json:"message"` + Code *string `json:"code,omitempty"` +} diff --git a/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/hrana/value.go b/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/hrana/value.go index eed7da6b94..a2c162fa56 100644 --- a/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/hrana/value.go +++ b/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/hrana/value.go @@ -1,85 +1,85 @@ -package hrana - -import ( - "encoding/base64" - "fmt" - "strconv" - "strings" - "time" -) - -type Value struct { - Type string `json:"type"` - Value any `json:"value,omitempty"` - Base64 string `json:"base64,omitempty"` -} - -func (v Value) ToValue(columnType *string) any { - if v.Type == "blob" { - bytes, err := base64.StdEncoding.WithPadding(base64.NoPadding).DecodeString(v.Base64) - if err != nil { - return nil - } - return bytes - } else if v.Type == "integer" { - integer, err := strconv.ParseInt(v.Value.(string), 10, 64) - if err != nil { - return nil - } - return integer - } else if columnType != nil { - if (strings.ToLower(*columnType) == "timestamp" || strings.ToLower(*columnType) == "datetime") && v.Type == "text" { - for _, format := range []string{ - "2006-01-02 15:04:05.999999999-07:00", - "2006-01-02T15:04:05.999999999-07:00", - "2006-01-02 15:04:05.999999999", - "2006-01-02T15:04:05.999999999", - "2006-01-02 15:04:05", - "2006-01-02T15:04:05", - "2006-01-02 15:04", - "2006-01-02T15:04", - "2006-01-02", - } { - if t, err := time.ParseInLocation(format, v.Value.(string), time.UTC); err == nil { - return t - } - } - } - } - - return v.Value -} - -func ToValue(v any) (Value, error) { - var res Value - if v == nil { - res.Type = "null" - } else if integer, ok := v.(int64); ok { - res.Type = "integer" - res.Value = strconv.FormatInt(integer, 10) - } else if integer, ok := v.(int); ok { - res.Type = "integer" - res.Value = strconv.FormatInt(int64(integer), 10) - } else if text, ok := v.(string); ok { - res.Type = "text" - res.Value = text - } else if blob, ok := v.([]byte); ok { - res.Type = "blob" - res.Base64 = base64.StdEncoding.WithPadding(base64.NoPadding).EncodeToString(blob) - } else if float, ok := v.(float64); ok { - res.Type = "float" - res.Value = float - } else if t, ok := v.(time.Time); ok { - res.Type = "text" - res.Value = t.Format("2006-01-02 15:04:05.999999999-07:00") - } else if t, ok := v.(bool); ok { - res.Type = "integer" - res.Value = "0" - if t { - res.Value = "1" - } - } else { - return res, fmt.Errorf("unsupported value type: %s", v) - } - return res, nil -} +package hrana + +import ( + "encoding/base64" + "fmt" + "strconv" + "strings" + "time" +) + +type Value struct { + Type string `json:"type"` + Value any `json:"value,omitempty"` + Base64 string `json:"base64,omitempty"` +} + +func (v Value) ToValue(columnType *string) any { + if v.Type == "blob" { + bytes, err := base64.StdEncoding.WithPadding(base64.NoPadding).DecodeString(v.Base64) + if err != nil { + return nil + } + return bytes + } else if v.Type == "integer" { + integer, err := strconv.ParseInt(v.Value.(string), 10, 64) + if err != nil { + return nil + } + return integer + } else if columnType != nil { + if (strings.ToLower(*columnType) == "timestamp" || strings.ToLower(*columnType) == "datetime") && v.Type == "text" { + for _, format := range []string{ + "2006-01-02 15:04:05.999999999-07:00", + "2006-01-02T15:04:05.999999999-07:00", + "2006-01-02 15:04:05.999999999", + "2006-01-02T15:04:05.999999999", + "2006-01-02 15:04:05", + "2006-01-02T15:04:05", + "2006-01-02 15:04", + "2006-01-02T15:04", + "2006-01-02", + } { + if t, err := time.ParseInLocation(format, v.Value.(string), time.UTC); err == nil { + return t + } + } + } + } + + return v.Value +} + +func ToValue(v any) (Value, error) { + var res Value + if v == nil { + res.Type = "null" + } else if integer, ok := v.(int64); ok { + res.Type = "integer" + res.Value = strconv.FormatInt(integer, 10) + } else if integer, ok := v.(int); ok { + res.Type = "integer" + res.Value = strconv.FormatInt(int64(integer), 10) + } else if text, ok := v.(string); ok { + res.Type = "text" + res.Value = text + } else if blob, ok := v.([]byte); ok { + res.Type = "blob" + res.Base64 = base64.StdEncoding.WithPadding(base64.NoPadding).EncodeToString(blob) + } else if float, ok := v.(float64); ok { + res.Type = "float" + res.Value = float + } else if t, ok := v.(time.Time); ok { + res.Type = "text" + res.Value = t.Format("2006-01-02 15:04:05.999999999-07:00") + } else if t, ok := v.(bool); ok { + res.Type = "integer" + res.Value = "0" + if t { + res.Value = "1" + } + } else { + return res, fmt.Errorf("unsupported value type: %s", v) + } + return res, nil +} diff --git a/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/http/driver.go b/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/http/driver.go index 52d100e9a9..b46b174f71 100644 --- a/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/http/driver.go +++ b/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/http/driver.go @@ -1,11 +1,11 @@ -package http - -import ( - "database/sql/driver" - - "github.com/tursodatabase/libsql-client-go/libsql/internal/http/hranaV2" -) - -func Connect(url, jwt, host string) driver.Conn { - return hranaV2.Connect(url, jwt, host) -} +package http + +import ( + "database/sql/driver" + + "github.com/tursodatabase/libsql-client-go/libsql/internal/http/hranaV2" +) + +func Connect(url, jwt, host string) driver.Conn { + return hranaV2.Connect(url, jwt, host) +} diff --git a/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/http/hranaV2/hranaV2.go b/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/http/hranaV2/hranaV2.go index 672eb4c848..12367ecc9b 100644 --- a/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/http/hranaV2/hranaV2.go +++ b/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/http/hranaV2/hranaV2.go @@ -1,476 +1,476 @@ -package hranaV2 - -import ( - "bytes" - "context" - "database/sql" - "database/sql/driver" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - net_url "net/url" - "runtime/debug" - "strings" - "time" - - "github.com/tursodatabase/libsql-client-go/libsql/internal/hrana" - "github.com/tursodatabase/libsql-client-go/libsql/internal/http/shared" -) - -var commitHash string - -func init() { - if info, ok := debug.ReadBuildInfo(); ok { - for _, module := range info.Deps { - if module.Path == "github.com/tursodatabase/libsql-client-go" { - parts := strings.Split(module.Version, "-") - if len(parts) == 3 { - commitHash = parts[2][:6] - return - } - } - } - } - commitHash = "unknown" -} - -func Connect(url, jwt, host string) driver.Conn { - return &hranaV2Conn{url, jwt, host, "", false, 0} -} - -type hranaV2Stmt struct { - conn *hranaV2Conn - numInput int - sql string -} - -func (s *hranaV2Stmt) Close() error { - return nil -} - -func (s *hranaV2Stmt) NumInput() int { - return s.numInput -} - -func convertToNamed(args []driver.Value) []driver.NamedValue { - if len(args) == 0 { - return nil - } - var result []driver.NamedValue - for idx := range args { - result = append(result, driver.NamedValue{Ordinal: idx, Value: args[idx]}) - } - return result -} - -func (s *hranaV2Stmt) Exec(args []driver.Value) (driver.Result, error) { - return s.ExecContext(context.Background(), convertToNamed(args)) -} - -func (s *hranaV2Stmt) Query(args []driver.Value) (driver.Rows, error) { - return s.QueryContext(context.Background(), convertToNamed(args)) -} - -func (s *hranaV2Stmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) { - return s.conn.ExecContext(ctx, s.sql, args) -} - -func (s *hranaV2Stmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) { - return s.conn.QueryContext(ctx, s.sql, args) -} - -type hranaV2Conn struct { - url string - jwt string - host string - baton string - streamClosed bool - replicationIndex uint64 -} - -func (h *hranaV2Conn) Ping() error { - return h.PingContext(context.Background()) -} - -func (h *hranaV2Conn) PingContext(ctx context.Context) error { - _, err := h.executeStmt(ctx, "SELECT 1", nil, false) - return err -} - -func (h *hranaV2Conn) Prepare(query string) (driver.Stmt, error) { - return h.PrepareContext(context.Background(), query) -} - -func (h *hranaV2Conn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) { - stmts, paramInfos, err := shared.ParseStatement(query) - if err != nil { - return nil, err - } - if len(stmts) != 1 { - return nil, fmt.Errorf("only one statement is supported got %d", len(stmts)) - } - numInput := -1 - if len(paramInfos[0].NamedParameters) == 0 { - numInput = paramInfos[0].PositionalParametersCount - } - return &hranaV2Stmt{h, numInput, query}, nil -} - -func (h *hranaV2Conn) Close() error { - if h.baton != "" { - go func(baton, url, jwt, host string) { - msg := hrana.PipelineRequest{Baton: baton} - msg.Add(hrana.CloseStream()) - _, _, _ = sendPipelineRequest(context.Background(), &msg, url, jwt, host) - }(h.baton, h.url, h.jwt, h.host) - } - return nil -} - -func (h *hranaV2Conn) Begin() (driver.Tx, error) { - return h.BeginTx(context.Background(), driver.TxOptions{}) -} - -type hranaV2Tx struct { - conn *hranaV2Conn -} - -func (h hranaV2Tx) Commit() error { - _, err := h.conn.ExecContext(context.Background(), "COMMIT", nil) - return err -} - -func (h hranaV2Tx) Rollback() error { - _, err := h.conn.ExecContext(context.Background(), "ROLLBACK", nil) - return err -} - -func (h *hranaV2Conn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) { - if opts.ReadOnly { - return nil, fmt.Errorf("read only transactions are not supported") - } - if opts.Isolation != driver.IsolationLevel(sql.LevelDefault) { - return nil, fmt.Errorf("isolation level %d is not supported", opts.Isolation) - } - _, err := h.ExecContext(ctx, "BEGIN", nil) - if err != nil { - return nil, err - } - return &hranaV2Tx{h}, nil -} - -func (h *hranaV2Conn) sendPipelineRequest(ctx context.Context, msg *hrana.PipelineRequest, streamClose bool) (*hrana.PipelineResponse, error) { - if h.streamClosed { - // If the stream is closed, we can't send any more requests using this connection. - return nil, fmt.Errorf("stream is closed: %w", driver.ErrBadConn) - } - if h.baton != "" { - msg.Baton = h.baton - } - if h.replicationIndex > 0 { - addReplicationIndex(msg, h.replicationIndex) - } - result, streamClosed, err := sendPipelineRequest(ctx, msg, h.url, h.jwt, h.host) - if streamClosed { - h.streamClosed = true - } - if err != nil { - return nil, err - } - h.baton = result.Baton - if result.Baton == "" && !streamClose { - // We need to remember that the stream is closed so we don't try to send any more requests using this connection. - h.streamClosed = true - } - if result.BaseUrl != "" { - h.url = result.BaseUrl - } - if idx := getReplicationIndex(&result); idx > h.replicationIndex { - h.replicationIndex = idx - } - return &result, nil -} - -func addReplicationIndex(msg *hrana.PipelineRequest, replicationIndex uint64) { - for i := range msg.Requests { - if msg.Requests[i].Stmt != nil && msg.Requests[i].Stmt.ReplicationIndex == nil { - msg.Requests[i].Stmt.ReplicationIndex = &replicationIndex - } else if msg.Requests[i].Batch != nil && msg.Requests[i].Batch.ReplicationIndex == nil { - msg.Requests[i].Batch.ReplicationIndex = &replicationIndex - } - } -} - -func getReplicationIndex(response *hrana.PipelineResponse) uint64 { - if response == nil || len(response.Results) == 0 { - return 0 - } - var replicationIndex uint64 - for _, result := range response.Results { - if result.Response == nil { - continue - } - if result.Response.Type == "execute" { - if res, err := result.Response.ExecuteResult(); err == nil && res.ReplicationIndex != nil { - if *res.ReplicationIndex > replicationIndex { - replicationIndex = *res.ReplicationIndex - } - } - } else if result.Response.Type == "batch" { - if res, err := result.Response.BatchResult(); err == nil && res.ReplicationIndex != nil { - if *res.ReplicationIndex > replicationIndex { - replicationIndex = *res.ReplicationIndex - } - } - } - } - return replicationIndex -} - -func sendPipelineRequest(ctx context.Context, msg *hrana.PipelineRequest, url string, jwt string, host string) (result hrana.PipelineResponse, streamClosed bool, err error) { - reqBody, err := json.Marshal(msg) - if err != nil { - return hrana.PipelineResponse{}, false, err - } - ctx, cancel := context.WithTimeout(ctx, 60*time.Second) - defer cancel() - pipelineURL, err := net_url.JoinPath(url, "/v2/pipeline") - if err != nil { - return hrana.PipelineResponse{}, false, err - } - req, err := http.NewRequestWithContext(ctx, "POST", pipelineURL, bytes.NewReader(reqBody)) - if err != nil { - return hrana.PipelineResponse{}, false, err - } - if len(jwt) > 0 { - req.Header.Set("Authorization", "Bearer "+jwt) - } - req.Header.Set("x-libsql-client-version", "libsql-remote-go-"+commitHash) - req.Host = host - resp, err := http.DefaultClient.Do(req) - if err != nil { - return hrana.PipelineResponse{}, false, err - } - defer resp.Body.Close() - body, err := io.ReadAll(resp.Body) - if err != nil { - return hrana.PipelineResponse{}, false, err - } - if resp.StatusCode != http.StatusOK { - // We need to remember that the stream is closed so we don't try to send any more requests using this connection. - var serverError struct { - Error string `json:"error"` - } - if err := json.Unmarshal(body, &serverError); err == nil { - return hrana.PipelineResponse{}, true, fmt.Errorf("error code %d: %s", resp.StatusCode, serverError.Error) - } - var errResponse hrana.Error - if err := json.Unmarshal(body, &errResponse); err == nil { - if errResponse.Code != nil { - if *errResponse.Code == "STREAM_EXPIRED" { - return hrana.PipelineResponse{}, true, fmt.Errorf("error code %s: %s\n%w", *errResponse.Code, errResponse.Message, driver.ErrBadConn) - } else { - return hrana.PipelineResponse{}, true, fmt.Errorf("error code %s: %s", *errResponse.Code, errResponse.Message) - } - } - return hrana.PipelineResponse{}, true, errors.New(errResponse.Message) - } - return hrana.PipelineResponse{}, true, fmt.Errorf("error code %d: %s", resp.StatusCode, string(body)) - } - if err = json.Unmarshal(body, &result); err != nil { - return hrana.PipelineResponse{}, false, err - } - return result, false, nil -} - -func (h *hranaV2Conn) executeStmt(ctx context.Context, query string, args []driver.NamedValue, wantRows bool) (*hrana.PipelineResponse, error) { - stmts, params, err := shared.ParseStatementAndArgs(query, args) - if err != nil { - return nil, fmt.Errorf("failed to execute SQL: %s\n%w", query, err) - } - msg := &hrana.PipelineRequest{} - if len(stmts) == 1 { - executeStream, err := hrana.ExecuteStream(stmts[0], params[0], wantRows) - if err != nil { - return nil, fmt.Errorf("failed to execute SQL: %s\n%w", query, err) - } - msg.Add(*executeStream) - } else { - batchStream, err := hrana.BatchStream(stmts, params, wantRows) - if err != nil { - return nil, fmt.Errorf("failed to execute SQL: %s\n%w", query, err) - } - msg.Add(*batchStream) - } - - result, err := h.sendPipelineRequest(ctx, msg, false) - if err != nil { - return nil, fmt.Errorf("failed to execute SQL: %s\n%w", query, err) - } - - if result.Results[0].Error != nil { - return nil, fmt.Errorf("failed to execute SQL: %s\n%s", query, result.Results[0].Error.Message) - } - if result.Results[0].Response == nil { - return nil, fmt.Errorf("failed to execute SQL: %s\n%s", query, "no response received") - } - return result, nil -} - -func (h *hranaV2Conn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) { - result, err := h.executeStmt(ctx, query, args, false) - if err != nil { - return nil, err - } - switch result.Results[0].Response.Type { - case "execute": - res, err := result.Results[0].Response.ExecuteResult() - if err != nil { - return nil, err - } - return shared.NewResult(res.GetLastInsertRowId(), int64(res.AffectedRowCount)), nil - case "batch": - res, err := result.Results[0].Response.BatchResult() - if err != nil { - return nil, err - } - lastInsertRowId := int64(0) - affectedRowCount := int64(0) - for _, r := range res.StepResults { - rowId := r.GetLastInsertRowId() - if rowId > 0 { - lastInsertRowId = rowId - } - affectedRowCount += int64(r.AffectedRowCount) - } - return shared.NewResult(lastInsertRowId, affectedRowCount), nil - default: - return nil, fmt.Errorf("failed to execute SQL: %s\n%s", query, "unknown response type") - } -} - -type StmtResultRowsProvider struct { - r *hrana.StmtResult -} - -func (p *StmtResultRowsProvider) SetsCount() int { - return 1 -} - -func (p *StmtResultRowsProvider) RowsCount(setIdx int) int { - if setIdx != 0 { - return 0 - } - return len(p.r.Rows) -} - -func (p *StmtResultRowsProvider) Columns(setIdx int) []string { - if setIdx != 0 { - return nil - } - res := make([]string, len(p.r.Cols)) - for i, c := range p.r.Cols { - if c.Name != nil { - res[i] = *c.Name - } - } - return res -} - -func (p *StmtResultRowsProvider) FieldValue(setIdx, rowIdx, colIdx int) driver.Value { - if setIdx != 0 { - return nil - } - return p.r.Rows[rowIdx][colIdx].ToValue(p.r.Cols[colIdx].Type) -} - -func (p *StmtResultRowsProvider) Error(setIdx int) string { - return "" -} - -func (p *StmtResultRowsProvider) HasResult(setIdx int) bool { - return setIdx == 0 -} - -type BatchResultRowsProvider struct { - r *hrana.BatchResult -} - -func (p *BatchResultRowsProvider) SetsCount() int { - return len(p.r.StepResults) -} - -func (p *BatchResultRowsProvider) RowsCount(setIdx int) int { - if setIdx >= len(p.r.StepResults) || p.r.StepResults[setIdx] == nil { - return 0 - } - return len(p.r.StepResults[setIdx].Rows) -} - -func (p *BatchResultRowsProvider) Columns(setIdx int) []string { - if setIdx >= len(p.r.StepResults) || p.r.StepResults[setIdx] == nil { - return nil - } - res := make([]string, len(p.r.StepResults[setIdx].Cols)) - for i, c := range p.r.StepResults[setIdx].Cols { - if c.Name != nil { - res[i] = *c.Name - } - } - return res -} - -func (p *BatchResultRowsProvider) FieldValue(setIdx, rowIdx, colIdx int) driver.Value { - if setIdx >= len(p.r.StepResults) || p.r.StepResults[setIdx] == nil { - return nil - } - return p.r.StepResults[setIdx].Rows[rowIdx][colIdx].ToValue(p.r.StepResults[setIdx].Cols[colIdx].Type) -} - -func (p *BatchResultRowsProvider) Error(setIdx int) string { - if setIdx >= len(p.r.StepErrors) || p.r.StepErrors[setIdx] == nil { - return "" - } - return p.r.StepErrors[setIdx].Message -} - -func (p *BatchResultRowsProvider) HasResult(setIdx int) bool { - return setIdx < len(p.r.StepResults) && p.r.StepResults[setIdx] != nil -} - -func (h *hranaV2Conn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) { - result, err := h.executeStmt(ctx, query, args, true) - if err != nil { - return nil, err - } - switch result.Results[0].Response.Type { - case "execute": - res, err := result.Results[0].Response.ExecuteResult() - if err != nil { - return nil, err - } - return shared.NewRows(&StmtResultRowsProvider{res}), nil - case "batch": - res, err := result.Results[0].Response.BatchResult() - if err != nil { - return nil, err - } - return shared.NewRows(&BatchResultRowsProvider{res}), nil - default: - return nil, fmt.Errorf("failed to execute SQL: %s\n%s", query, "unknown response type") - } -} - -func (h *hranaV2Conn) ResetSession(ctx context.Context) error { - if h.baton != "" { - go func(baton, url, jwt, host string) { - msg := hrana.PipelineRequest{Baton: baton} - msg.Add(hrana.CloseStream()) - _, _, _ = sendPipelineRequest(context.Background(), &msg, url, jwt, host) - }(h.baton, h.url, h.jwt, h.host) - h.baton = "" - } - return nil -} +package hranaV2 + +import ( + "bytes" + "context" + "database/sql" + "database/sql/driver" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + net_url "net/url" + "runtime/debug" + "strings" + "time" + + "github.com/tursodatabase/libsql-client-go/libsql/internal/hrana" + "github.com/tursodatabase/libsql-client-go/libsql/internal/http/shared" +) + +var commitHash string + +func init() { + if info, ok := debug.ReadBuildInfo(); ok { + for _, module := range info.Deps { + if module.Path == "github.com/tursodatabase/libsql-client-go" { + parts := strings.Split(module.Version, "-") + if len(parts) == 3 { + commitHash = parts[2][:6] + return + } + } + } + } + commitHash = "unknown" +} + +func Connect(url, jwt, host string) driver.Conn { + return &hranaV2Conn{url, jwt, host, "", false, 0} +} + +type hranaV2Stmt struct { + conn *hranaV2Conn + numInput int + sql string +} + +func (s *hranaV2Stmt) Close() error { + return nil +} + +func (s *hranaV2Stmt) NumInput() int { + return s.numInput +} + +func convertToNamed(args []driver.Value) []driver.NamedValue { + if len(args) == 0 { + return nil + } + var result []driver.NamedValue + for idx := range args { + result = append(result, driver.NamedValue{Ordinal: idx, Value: args[idx]}) + } + return result +} + +func (s *hranaV2Stmt) Exec(args []driver.Value) (driver.Result, error) { + return s.ExecContext(context.Background(), convertToNamed(args)) +} + +func (s *hranaV2Stmt) Query(args []driver.Value) (driver.Rows, error) { + return s.QueryContext(context.Background(), convertToNamed(args)) +} + +func (s *hranaV2Stmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) { + return s.conn.ExecContext(ctx, s.sql, args) +} + +func (s *hranaV2Stmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) { + return s.conn.QueryContext(ctx, s.sql, args) +} + +type hranaV2Conn struct { + url string + jwt string + host string + baton string + streamClosed bool + replicationIndex uint64 +} + +func (h *hranaV2Conn) Ping() error { + return h.PingContext(context.Background()) +} + +func (h *hranaV2Conn) PingContext(ctx context.Context) error { + _, err := h.executeStmt(ctx, "SELECT 1", nil, false) + return err +} + +func (h *hranaV2Conn) Prepare(query string) (driver.Stmt, error) { + return h.PrepareContext(context.Background(), query) +} + +func (h *hranaV2Conn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) { + stmts, paramInfos, err := shared.ParseStatement(query) + if err != nil { + return nil, err + } + if len(stmts) != 1 { + return nil, fmt.Errorf("only one statement is supported got %d", len(stmts)) + } + numInput := -1 + if len(paramInfos[0].NamedParameters) == 0 { + numInput = paramInfos[0].PositionalParametersCount + } + return &hranaV2Stmt{h, numInput, query}, nil +} + +func (h *hranaV2Conn) Close() error { + if h.baton != "" { + go func(baton, url, jwt, host string) { + msg := hrana.PipelineRequest{Baton: baton} + msg.Add(hrana.CloseStream()) + _, _, _ = sendPipelineRequest(context.Background(), &msg, url, jwt, host) + }(h.baton, h.url, h.jwt, h.host) + } + return nil +} + +func (h *hranaV2Conn) Begin() (driver.Tx, error) { + return h.BeginTx(context.Background(), driver.TxOptions{}) +} + +type hranaV2Tx struct { + conn *hranaV2Conn +} + +func (h hranaV2Tx) Commit() error { + _, err := h.conn.ExecContext(context.Background(), "COMMIT", nil) + return err +} + +func (h hranaV2Tx) Rollback() error { + _, err := h.conn.ExecContext(context.Background(), "ROLLBACK", nil) + return err +} + +func (h *hranaV2Conn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) { + if opts.ReadOnly { + return nil, fmt.Errorf("read only transactions are not supported") + } + if opts.Isolation != driver.IsolationLevel(sql.LevelDefault) { + return nil, fmt.Errorf("isolation level %d is not supported", opts.Isolation) + } + _, err := h.ExecContext(ctx, "BEGIN", nil) + if err != nil { + return nil, err + } + return &hranaV2Tx{h}, nil +} + +func (h *hranaV2Conn) sendPipelineRequest(ctx context.Context, msg *hrana.PipelineRequest, streamClose bool) (*hrana.PipelineResponse, error) { + if h.streamClosed { + // If the stream is closed, we can't send any more requests using this connection. + return nil, fmt.Errorf("stream is closed: %w", driver.ErrBadConn) + } + if h.baton != "" { + msg.Baton = h.baton + } + if h.replicationIndex > 0 { + addReplicationIndex(msg, h.replicationIndex) + } + result, streamClosed, err := sendPipelineRequest(ctx, msg, h.url, h.jwt, h.host) + if streamClosed { + h.streamClosed = true + } + if err != nil { + return nil, err + } + h.baton = result.Baton + if result.Baton == "" && !streamClose { + // We need to remember that the stream is closed so we don't try to send any more requests using this connection. + h.streamClosed = true + } + if result.BaseUrl != "" { + h.url = result.BaseUrl + } + if idx := getReplicationIndex(&result); idx > h.replicationIndex { + h.replicationIndex = idx + } + return &result, nil +} + +func addReplicationIndex(msg *hrana.PipelineRequest, replicationIndex uint64) { + for i := range msg.Requests { + if msg.Requests[i].Stmt != nil && msg.Requests[i].Stmt.ReplicationIndex == nil { + msg.Requests[i].Stmt.ReplicationIndex = &replicationIndex + } else if msg.Requests[i].Batch != nil && msg.Requests[i].Batch.ReplicationIndex == nil { + msg.Requests[i].Batch.ReplicationIndex = &replicationIndex + } + } +} + +func getReplicationIndex(response *hrana.PipelineResponse) uint64 { + if response == nil || len(response.Results) == 0 { + return 0 + } + var replicationIndex uint64 + for _, result := range response.Results { + if result.Response == nil { + continue + } + if result.Response.Type == "execute" { + if res, err := result.Response.ExecuteResult(); err == nil && res.ReplicationIndex != nil { + if *res.ReplicationIndex > replicationIndex { + replicationIndex = *res.ReplicationIndex + } + } + } else if result.Response.Type == "batch" { + if res, err := result.Response.BatchResult(); err == nil && res.ReplicationIndex != nil { + if *res.ReplicationIndex > replicationIndex { + replicationIndex = *res.ReplicationIndex + } + } + } + } + return replicationIndex +} + +func sendPipelineRequest(ctx context.Context, msg *hrana.PipelineRequest, url string, jwt string, host string) (result hrana.PipelineResponse, streamClosed bool, err error) { + reqBody, err := json.Marshal(msg) + if err != nil { + return hrana.PipelineResponse{}, false, err + } + ctx, cancel := context.WithTimeout(ctx, 60*time.Second) + defer cancel() + pipelineURL, err := net_url.JoinPath(url, "/v2/pipeline") + if err != nil { + return hrana.PipelineResponse{}, false, err + } + req, err := http.NewRequestWithContext(ctx, "POST", pipelineURL, bytes.NewReader(reqBody)) + if err != nil { + return hrana.PipelineResponse{}, false, err + } + if len(jwt) > 0 { + req.Header.Set("Authorization", "Bearer "+jwt) + } + req.Header.Set("x-libsql-client-version", "libsql-remote-go-"+commitHash) + req.Host = host + resp, err := http.DefaultClient.Do(req) + if err != nil { + return hrana.PipelineResponse{}, false, err + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return hrana.PipelineResponse{}, false, err + } + if resp.StatusCode != http.StatusOK { + // We need to remember that the stream is closed so we don't try to send any more requests using this connection. + var serverError struct { + Error string `json:"error"` + } + if err := json.Unmarshal(body, &serverError); err == nil { + return hrana.PipelineResponse{}, true, fmt.Errorf("error code %d: %s", resp.StatusCode, serverError.Error) + } + var errResponse hrana.Error + if err := json.Unmarshal(body, &errResponse); err == nil { + if errResponse.Code != nil { + if *errResponse.Code == "STREAM_EXPIRED" { + return hrana.PipelineResponse{}, true, fmt.Errorf("error code %s: %s\n%w", *errResponse.Code, errResponse.Message, driver.ErrBadConn) + } else { + return hrana.PipelineResponse{}, true, fmt.Errorf("error code %s: %s", *errResponse.Code, errResponse.Message) + } + } + return hrana.PipelineResponse{}, true, errors.New(errResponse.Message) + } + return hrana.PipelineResponse{}, true, fmt.Errorf("error code %d: %s", resp.StatusCode, string(body)) + } + if err = json.Unmarshal(body, &result); err != nil { + return hrana.PipelineResponse{}, false, err + } + return result, false, nil +} + +func (h *hranaV2Conn) executeStmt(ctx context.Context, query string, args []driver.NamedValue, wantRows bool) (*hrana.PipelineResponse, error) { + stmts, params, err := shared.ParseStatementAndArgs(query, args) + if err != nil { + return nil, fmt.Errorf("failed to execute SQL: %s\n%w", query, err) + } + msg := &hrana.PipelineRequest{} + if len(stmts) == 1 { + executeStream, err := hrana.ExecuteStream(stmts[0], params[0], wantRows) + if err != nil { + return nil, fmt.Errorf("failed to execute SQL: %s\n%w", query, err) + } + msg.Add(*executeStream) + } else { + batchStream, err := hrana.BatchStream(stmts, params, wantRows) + if err != nil { + return nil, fmt.Errorf("failed to execute SQL: %s\n%w", query, err) + } + msg.Add(*batchStream) + } + + result, err := h.sendPipelineRequest(ctx, msg, false) + if err != nil { + return nil, fmt.Errorf("failed to execute SQL: %s\n%w", query, err) + } + + if result.Results[0].Error != nil { + return nil, fmt.Errorf("failed to execute SQL: %s\n%s", query, result.Results[0].Error.Message) + } + if result.Results[0].Response == nil { + return nil, fmt.Errorf("failed to execute SQL: %s\n%s", query, "no response received") + } + return result, nil +} + +func (h *hranaV2Conn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) { + result, err := h.executeStmt(ctx, query, args, false) + if err != nil { + return nil, err + } + switch result.Results[0].Response.Type { + case "execute": + res, err := result.Results[0].Response.ExecuteResult() + if err != nil { + return nil, err + } + return shared.NewResult(res.GetLastInsertRowId(), int64(res.AffectedRowCount)), nil + case "batch": + res, err := result.Results[0].Response.BatchResult() + if err != nil { + return nil, err + } + lastInsertRowId := int64(0) + affectedRowCount := int64(0) + for _, r := range res.StepResults { + rowId := r.GetLastInsertRowId() + if rowId > 0 { + lastInsertRowId = rowId + } + affectedRowCount += int64(r.AffectedRowCount) + } + return shared.NewResult(lastInsertRowId, affectedRowCount), nil + default: + return nil, fmt.Errorf("failed to execute SQL: %s\n%s", query, "unknown response type") + } +} + +type StmtResultRowsProvider struct { + r *hrana.StmtResult +} + +func (p *StmtResultRowsProvider) SetsCount() int { + return 1 +} + +func (p *StmtResultRowsProvider) RowsCount(setIdx int) int { + if setIdx != 0 { + return 0 + } + return len(p.r.Rows) +} + +func (p *StmtResultRowsProvider) Columns(setIdx int) []string { + if setIdx != 0 { + return nil + } + res := make([]string, len(p.r.Cols)) + for i, c := range p.r.Cols { + if c.Name != nil { + res[i] = *c.Name + } + } + return res +} + +func (p *StmtResultRowsProvider) FieldValue(setIdx, rowIdx, colIdx int) driver.Value { + if setIdx != 0 { + return nil + } + return p.r.Rows[rowIdx][colIdx].ToValue(p.r.Cols[colIdx].Type) +} + +func (p *StmtResultRowsProvider) Error(setIdx int) string { + return "" +} + +func (p *StmtResultRowsProvider) HasResult(setIdx int) bool { + return setIdx == 0 +} + +type BatchResultRowsProvider struct { + r *hrana.BatchResult +} + +func (p *BatchResultRowsProvider) SetsCount() int { + return len(p.r.StepResults) +} + +func (p *BatchResultRowsProvider) RowsCount(setIdx int) int { + if setIdx >= len(p.r.StepResults) || p.r.StepResults[setIdx] == nil { + return 0 + } + return len(p.r.StepResults[setIdx].Rows) +} + +func (p *BatchResultRowsProvider) Columns(setIdx int) []string { + if setIdx >= len(p.r.StepResults) || p.r.StepResults[setIdx] == nil { + return nil + } + res := make([]string, len(p.r.StepResults[setIdx].Cols)) + for i, c := range p.r.StepResults[setIdx].Cols { + if c.Name != nil { + res[i] = *c.Name + } + } + return res +} + +func (p *BatchResultRowsProvider) FieldValue(setIdx, rowIdx, colIdx int) driver.Value { + if setIdx >= len(p.r.StepResults) || p.r.StepResults[setIdx] == nil { + return nil + } + return p.r.StepResults[setIdx].Rows[rowIdx][colIdx].ToValue(p.r.StepResults[setIdx].Cols[colIdx].Type) +} + +func (p *BatchResultRowsProvider) Error(setIdx int) string { + if setIdx >= len(p.r.StepErrors) || p.r.StepErrors[setIdx] == nil { + return "" + } + return p.r.StepErrors[setIdx].Message +} + +func (p *BatchResultRowsProvider) HasResult(setIdx int) bool { + return setIdx < len(p.r.StepResults) && p.r.StepResults[setIdx] != nil +} + +func (h *hranaV2Conn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) { + result, err := h.executeStmt(ctx, query, args, true) + if err != nil { + return nil, err + } + switch result.Results[0].Response.Type { + case "execute": + res, err := result.Results[0].Response.ExecuteResult() + if err != nil { + return nil, err + } + return shared.NewRows(&StmtResultRowsProvider{res}), nil + case "batch": + res, err := result.Results[0].Response.BatchResult() + if err != nil { + return nil, err + } + return shared.NewRows(&BatchResultRowsProvider{res}), nil + default: + return nil, fmt.Errorf("failed to execute SQL: %s\n%s", query, "unknown response type") + } +} + +func (h *hranaV2Conn) ResetSession(ctx context.Context) error { + if h.baton != "" { + go func(baton, url, jwt, host string) { + msg := hrana.PipelineRequest{Baton: baton} + msg.Add(hrana.CloseStream()) + _, _, _ = sendPipelineRequest(context.Background(), &msg, url, jwt, host) + }(h.baton, h.url, h.jwt, h.host) + h.baton = "" + } + return nil +} diff --git a/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/http/shared/result.go b/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/http/shared/result.go index 59250a941c..df3c427a36 100644 --- a/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/http/shared/result.go +++ b/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/http/shared/result.go @@ -1,18 +1,18 @@ -package shared - -type result struct { - id int64 - changes int64 -} - -func NewResult(id, changes int64) *result { - return &result{id: id, changes: changes} -} - -func (r *result) LastInsertId() (int64, error) { - return r.id, nil -} - -func (r *result) RowsAffected() (int64, error) { - return r.changes, nil -} +package shared + +type result struct { + id int64 + changes int64 +} + +func NewResult(id, changes int64) *result { + return &result{id: id, changes: changes} +} + +func (r *result) LastInsertId() (int64, error) { + return r.id, nil +} + +func (r *result) RowsAffected() (int64, error) { + return r.changes, nil +} diff --git a/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/http/shared/rows.go b/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/http/shared/rows.go index 0de54a7fad..c64a81573e 100644 --- a/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/http/shared/rows.go +++ b/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/http/shared/rows.go @@ -1,69 +1,69 @@ -package shared - -import ( - "database/sql/driver" - "fmt" - "io" -) - -type rowsProvider interface { - SetsCount() int - RowsCount(setIdx int) int - Columns(setIdx int) []string - FieldValue(setIdx, rowIdx int, columnIdx int) driver.Value - Error(setIdx int) string - HasResult(setIdx int) bool -} - -func NewRows(result rowsProvider) driver.Rows { - return &rows{result: result} -} - -type rows struct { - result rowsProvider - currentResultSetIndex int - currentRowIdx int -} - -func (r *rows) Columns() []string { - return r.result.Columns(r.currentResultSetIndex) -} - -func (r *rows) Close() error { - return nil -} - -func (r *rows) Next(dest []driver.Value) error { - if r.currentRowIdx == r.result.RowsCount(r.currentResultSetIndex) { - return io.EOF - } - count := len(r.result.Columns(r.currentResultSetIndex)) - for idx := 0; idx < count; idx++ { - dest[idx] = r.result.FieldValue(r.currentResultSetIndex, r.currentRowIdx, idx) - } - r.currentRowIdx++ - return nil -} - -func (r *rows) HasNextResultSet() bool { - return r.currentResultSetIndex < r.result.SetsCount()-1 -} - -func (r *rows) NextResultSet() error { - if !r.HasNextResultSet() { - return io.EOF - } - - r.currentResultSetIndex++ - r.currentRowIdx = 0 - - errStr := r.result.Error(r.currentResultSetIndex) - if errStr != "" { - return fmt.Errorf("failed to execute statement\n%s", errStr) - } - if !r.result.HasResult(r.currentResultSetIndex) { - return fmt.Errorf("no results for statement") - } - - return nil -} +package shared + +import ( + "database/sql/driver" + "fmt" + "io" +) + +type rowsProvider interface { + SetsCount() int + RowsCount(setIdx int) int + Columns(setIdx int) []string + FieldValue(setIdx, rowIdx int, columnIdx int) driver.Value + Error(setIdx int) string + HasResult(setIdx int) bool +} + +func NewRows(result rowsProvider) driver.Rows { + return &rows{result: result} +} + +type rows struct { + result rowsProvider + currentResultSetIndex int + currentRowIdx int +} + +func (r *rows) Columns() []string { + return r.result.Columns(r.currentResultSetIndex) +} + +func (r *rows) Close() error { + return nil +} + +func (r *rows) Next(dest []driver.Value) error { + if r.currentRowIdx == r.result.RowsCount(r.currentResultSetIndex) { + return io.EOF + } + count := len(r.result.Columns(r.currentResultSetIndex)) + for idx := 0; idx < count; idx++ { + dest[idx] = r.result.FieldValue(r.currentResultSetIndex, r.currentRowIdx, idx) + } + r.currentRowIdx++ + return nil +} + +func (r *rows) HasNextResultSet() bool { + return r.currentResultSetIndex < r.result.SetsCount()-1 +} + +func (r *rows) NextResultSet() error { + if !r.HasNextResultSet() { + return io.EOF + } + + r.currentResultSetIndex++ + r.currentRowIdx = 0 + + errStr := r.result.Error(r.currentResultSetIndex) + if errStr != "" { + return fmt.Errorf("failed to execute statement\n%s", errStr) + } + if !r.result.HasResult(r.currentResultSetIndex) { + return fmt.Errorf("no results for statement") + } + + return nil +} diff --git a/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/http/shared/statement.go b/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/http/shared/statement.go index 9e5974f4e4..9a8125e74e 100644 --- a/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/http/shared/statement.go +++ b/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/http/shared/statement.go @@ -1,240 +1,240 @@ -package shared - -import ( - "database/sql/driver" - "encoding/json" - "fmt" - "regexp" - "sort" - - "github.com/antlr/antlr4/runtime/Go/antlr/v4" - "github.com/libsql/sqlite-antlr4-parser/sqliteparser" - "github.com/libsql/sqlite-antlr4-parser/sqliteparserutils" -) - -type ParamsInfo struct { - NamedParameters []string - PositionalParametersCount int -} - -func ParseStatement(sql string) ([]string, []ParamsInfo, error) { - stmts, _ := sqliteparserutils.SplitStatement(sql) - - stmtsParams := make([]ParamsInfo, len(stmts)) - for idx, stmt := range stmts { - nameParams, positionalParamsCount, err := extractParameters(stmt) - if err != nil { - return nil, nil, err - } - stmtsParams[idx] = ParamsInfo{nameParams, positionalParamsCount} - } - return stmts, stmtsParams, nil -} - -func ParseStatementAndArgs(sql string, args []driver.NamedValue) ([]string, []Params, error) { - parameters, err := ConvertArgs(args) - if err != nil { - return nil, nil, err - } - - stmts, _ := sqliteparserutils.SplitStatement(sql) - - stmtsParams := make([]Params, len(stmts)) - totalParametersAlreadyUsed := 0 - for idx, stmt := range stmts { - stmtParams, err := generateStatementParameters(stmt, parameters, totalParametersAlreadyUsed) - if err != nil { - return nil, nil, fmt.Errorf("fail to generate statement parameter. statement: %s. error: %v", stmt, err) - } - stmtsParams[idx] = stmtParams - totalParametersAlreadyUsed += stmtParams.Len() - } - return stmts, stmtsParams, nil -} - -type paramsType int - -const ( - namedParameters paramsType = iota - positionalParameters -) - -type Params struct { - positional []any - named map[string]any -} - -func (p *Params) MarshalJSON() ([]byte, error) { - if len(p.named) > 0 { - return json.Marshal(p.named) - } - if len(p.positional) > 0 { - return json.Marshal(p.positional) - } - return json.Marshal(make([]any, 0)) -} - -func (p *Params) Named() map[string]any { - return p.named -} - -func (p *Params) Positional() []any { - return p.positional -} - -func (p *Params) Len() int { - if p.named != nil { - return len(p.named) - } - - return len(p.positional) -} - -func (p *Params) Type() paramsType { - if p.named != nil { - return namedParameters - } - - return positionalParameters -} - -func NewParams(t paramsType) Params { - p := Params{} - switch t { - case namedParameters: - p.named = make(map[string]any) - case positionalParameters: - p.positional = make([]any, 0) - } - - return p -} - -func getParamType(arg *driver.NamedValue) paramsType { - if arg.Name == "" { - return positionalParameters - } - return namedParameters -} - -func ConvertArgs(args []driver.NamedValue) (Params, error) { - if len(args) == 0 { - return NewParams(positionalParameters), nil - } - - var sortedArgs []*driver.NamedValue - for idx := range args { - sortedArgs = append(sortedArgs, &args[idx]) - } - sort.Slice(sortedArgs, func(i, j int) bool { - return sortedArgs[i].Ordinal < sortedArgs[j].Ordinal - }) - - parametersType := getParamType(sortedArgs[0]) - parameters := NewParams(parametersType) - for _, arg := range sortedArgs { - if parametersType != getParamType(arg) { - return Params{}, fmt.Errorf("driver does not accept positional and named parameters at the same time") - } - - switch parametersType { - case positionalParameters: - parameters.positional = append(parameters.positional, arg.Value) - case namedParameters: - parameters.named[arg.Name] = arg.Value - } - } - return parameters, nil -} - -func generateStatementParameters(stmt string, queryParams Params, positionalParametersOffset int) (Params, error) { - nameParams, positionalParamsCount, err := extractParameters(stmt) - if err != nil { - return Params{}, err - } - - stmtParams := NewParams(queryParams.Type()) - - switch queryParams.Type() { - case positionalParameters: - if positionalParametersOffset+positionalParamsCount > len(queryParams.positional) { - return Params{}, fmt.Errorf("missing positional parameters") - } - stmtParams.positional = queryParams.positional[positionalParametersOffset : positionalParametersOffset+positionalParamsCount] - case namedParameters: - stmtParametersNeeded := make(map[string]bool) - for _, stmtParametersName := range nameParams { - stmtParametersNeeded[stmtParametersName] = true - } - for queryParamsName, queryParamsValue := range queryParams.named { - if stmtParametersNeeded[queryParamsName] { - stmtParams.named[queryParamsName] = queryParamsValue - delete(stmtParametersNeeded, queryParamsName) - } - } - } - - return stmtParams, nil -} - -func extractParameters(stmt string) (nameParams []string, positionalParamsCount int, err error) { - statementStream := antlr.NewInputStream(stmt) - sqliteparser.NewSQLiteLexer(statementStream) - lexer := sqliteparser.NewSQLiteLexer(statementStream) - - allTokens := lexer.GetAllTokens() - - nameParamsSet := make(map[string]bool) - - for _, token := range allTokens { - tokenType := token.GetTokenType() - if tokenType == sqliteparser.SQLiteLexerBIND_PARAMETER { - parameter := token.GetText() - - isPositionalParameter, err := isPositionalParameter(parameter) - if err != nil { - return []string{}, 0, err - } - - if isPositionalParameter { - positionalParamsCount++ - } else { - paramWithoutPrefix, err := removeParamPrefix(parameter) - if err != nil { - return []string{}, 0, err - } else { - nameParamsSet[paramWithoutPrefix] = true - } - } - } - } - - nameParams = make([]string, 0, len(nameParamsSet)) - for k := range nameParamsSet { - nameParams = append(nameParams, k) - } - - return nameParams, positionalParamsCount, nil -} - -func isPositionalParameter(param string) (ok bool, err error) { - re := regexp.MustCompile(`\?([0-9]*).*`) - match := re.FindSubmatch([]byte(param)) - if match == nil { - return false, nil - } - - posS := string(match[1]) - if posS == "" { - return true, nil - } - - return true, fmt.Errorf("unsuppoted positional parameter. This driver does not accept positional parameters with indexes (like ?)") -} - -func removeParamPrefix(paramName string) (string, error) { - if paramName[0] == ':' || paramName[0] == '@' || paramName[0] == '$' { - return paramName[1:], nil - } - return "", fmt.Errorf("all named parameters must start with ':', or '@' or '$'") -} +package shared + +import ( + "database/sql/driver" + "encoding/json" + "fmt" + "regexp" + "sort" + + "github.com/antlr/antlr4/runtime/Go/antlr/v4" + "github.com/libsql/sqlite-antlr4-parser/sqliteparser" + "github.com/libsql/sqlite-antlr4-parser/sqliteparserutils" +) + +type ParamsInfo struct { + NamedParameters []string + PositionalParametersCount int +} + +func ParseStatement(sql string) ([]string, []ParamsInfo, error) { + stmts, _ := sqliteparserutils.SplitStatement(sql) + + stmtsParams := make([]ParamsInfo, len(stmts)) + for idx, stmt := range stmts { + nameParams, positionalParamsCount, err := extractParameters(stmt) + if err != nil { + return nil, nil, err + } + stmtsParams[idx] = ParamsInfo{nameParams, positionalParamsCount} + } + return stmts, stmtsParams, nil +} + +func ParseStatementAndArgs(sql string, args []driver.NamedValue) ([]string, []Params, error) { + parameters, err := ConvertArgs(args) + if err != nil { + return nil, nil, err + } + + stmts, _ := sqliteparserutils.SplitStatement(sql) + + stmtsParams := make([]Params, len(stmts)) + totalParametersAlreadyUsed := 0 + for idx, stmt := range stmts { + stmtParams, err := generateStatementParameters(stmt, parameters, totalParametersAlreadyUsed) + if err != nil { + return nil, nil, fmt.Errorf("fail to generate statement parameter. statement: %s. error: %v", stmt, err) + } + stmtsParams[idx] = stmtParams + totalParametersAlreadyUsed += stmtParams.Len() + } + return stmts, stmtsParams, nil +} + +type paramsType int + +const ( + namedParameters paramsType = iota + positionalParameters +) + +type Params struct { + positional []any + named map[string]any +} + +func (p *Params) MarshalJSON() ([]byte, error) { + if len(p.named) > 0 { + return json.Marshal(p.named) + } + if len(p.positional) > 0 { + return json.Marshal(p.positional) + } + return json.Marshal(make([]any, 0)) +} + +func (p *Params) Named() map[string]any { + return p.named +} + +func (p *Params) Positional() []any { + return p.positional +} + +func (p *Params) Len() int { + if p.named != nil { + return len(p.named) + } + + return len(p.positional) +} + +func (p *Params) Type() paramsType { + if p.named != nil { + return namedParameters + } + + return positionalParameters +} + +func NewParams(t paramsType) Params { + p := Params{} + switch t { + case namedParameters: + p.named = make(map[string]any) + case positionalParameters: + p.positional = make([]any, 0) + } + + return p +} + +func getParamType(arg *driver.NamedValue) paramsType { + if arg.Name == "" { + return positionalParameters + } + return namedParameters +} + +func ConvertArgs(args []driver.NamedValue) (Params, error) { + if len(args) == 0 { + return NewParams(positionalParameters), nil + } + + var sortedArgs []*driver.NamedValue + for idx := range args { + sortedArgs = append(sortedArgs, &args[idx]) + } + sort.Slice(sortedArgs, func(i, j int) bool { + return sortedArgs[i].Ordinal < sortedArgs[j].Ordinal + }) + + parametersType := getParamType(sortedArgs[0]) + parameters := NewParams(parametersType) + for _, arg := range sortedArgs { + if parametersType != getParamType(arg) { + return Params{}, fmt.Errorf("driver does not accept positional and named parameters at the same time") + } + + switch parametersType { + case positionalParameters: + parameters.positional = append(parameters.positional, arg.Value) + case namedParameters: + parameters.named[arg.Name] = arg.Value + } + } + return parameters, nil +} + +func generateStatementParameters(stmt string, queryParams Params, positionalParametersOffset int) (Params, error) { + nameParams, positionalParamsCount, err := extractParameters(stmt) + if err != nil { + return Params{}, err + } + + stmtParams := NewParams(queryParams.Type()) + + switch queryParams.Type() { + case positionalParameters: + if positionalParametersOffset+positionalParamsCount > len(queryParams.positional) { + return Params{}, fmt.Errorf("missing positional parameters") + } + stmtParams.positional = queryParams.positional[positionalParametersOffset : positionalParametersOffset+positionalParamsCount] + case namedParameters: + stmtParametersNeeded := make(map[string]bool) + for _, stmtParametersName := range nameParams { + stmtParametersNeeded[stmtParametersName] = true + } + for queryParamsName, queryParamsValue := range queryParams.named { + if stmtParametersNeeded[queryParamsName] { + stmtParams.named[queryParamsName] = queryParamsValue + delete(stmtParametersNeeded, queryParamsName) + } + } + } + + return stmtParams, nil +} + +func extractParameters(stmt string) (nameParams []string, positionalParamsCount int, err error) { + statementStream := antlr.NewInputStream(stmt) + sqliteparser.NewSQLiteLexer(statementStream) + lexer := sqliteparser.NewSQLiteLexer(statementStream) + + allTokens := lexer.GetAllTokens() + + nameParamsSet := make(map[string]bool) + + for _, token := range allTokens { + tokenType := token.GetTokenType() + if tokenType == sqliteparser.SQLiteLexerBIND_PARAMETER { + parameter := token.GetText() + + isPositionalParameter, err := isPositionalParameter(parameter) + if err != nil { + return []string{}, 0, err + } + + if isPositionalParameter { + positionalParamsCount++ + } else { + paramWithoutPrefix, err := removeParamPrefix(parameter) + if err != nil { + return []string{}, 0, err + } else { + nameParamsSet[paramWithoutPrefix] = true + } + } + } + } + + nameParams = make([]string, 0, len(nameParamsSet)) + for k := range nameParamsSet { + nameParams = append(nameParams, k) + } + + return nameParams, positionalParamsCount, nil +} + +func isPositionalParameter(param string) (ok bool, err error) { + re := regexp.MustCompile(`\?([0-9]*).*`) + match := re.FindSubmatch([]byte(param)) + if match == nil { + return false, nil + } + + posS := string(match[1]) + if posS == "" { + return true, nil + } + + return true, fmt.Errorf("unsuppoted positional parameter. This driver does not accept positional parameters with indexes (like ?)") +} + +func removeParamPrefix(paramName string) (string, error) { + if paramName[0] == ':' || paramName[0] == '@' || paramName[0] == '$' { + return paramName[1:], nil + } + return "", fmt.Errorf("all named parameters must start with ':', or '@' or '$'") +} diff --git a/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/ws/driver.go b/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/ws/driver.go index a36aee6beb..604e3e12fe 100644 --- a/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/ws/driver.go +++ b/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/ws/driver.go @@ -1,194 +1,194 @@ -package ws - -import ( - "context" - "database/sql/driver" - "io" - "sort" -) - -type result struct { - id int64 - changes int64 -} - -func (r *result) LastInsertId() (int64, error) { - return r.id, nil -} - -func (r *result) RowsAffected() (int64, error) { - return r.changes, nil -} - -type rows struct { - res *execResponse - currentRowIdx int -} - -func (r *rows) Columns() []string { - return r.res.columns() -} - -func (r *rows) Close() error { - return nil -} - -func (r *rows) Next(dest []driver.Value) error { - if r.currentRowIdx == r.res.rowsCount() { - return io.EOF - } - count := r.res.rowLen(r.currentRowIdx) - for idx := 0; idx < count; idx++ { - v, err := r.res.value(r.currentRowIdx, idx) - if err != nil { - return err - } - dest[idx] = v - } - r.currentRowIdx++ - return nil -} - -type conn struct { - ws *websocketConn -} - -func Connect(url string, jwt string) (*conn, error) { - c, err := connect(url, jwt) - if err != nil { - return nil, err - } - return &conn{c}, nil -} - -type stmt struct { - c *conn - query string -} - -func (s stmt) Close() error { - return nil -} - -func (s stmt) NumInput() int { - return -1 -} - -func convertToNamed(args []driver.Value) []driver.NamedValue { - if len(args) == 0 { - return nil - } - result := []driver.NamedValue{} - for idx := range args { - result = append(result, driver.NamedValue{Ordinal: idx, Value: args[idx]}) - } - return result -} - -func (s stmt) Exec(args []driver.Value) (driver.Result, error) { - return s.ExecContext(context.Background(), convertToNamed(args)) -} - -func (s stmt) Query(args []driver.Value) (driver.Rows, error) { - return s.QueryContext(context.Background(), convertToNamed(args)) -} - -func (s stmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) { - return s.c.ExecContext(ctx, s.query, args) -} - -func (s stmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) { - return s.c.QueryContext(ctx, s.query, args) -} - -func (c *conn) Ping() error { - return c.PingContext(context.Background()) -} - -func (c *conn) PingContext(ctx context.Context) error { - _, err := c.ws.exec(ctx, "SELECT 1", params{}, false) - return err -} - -func (c *conn) Prepare(query string) (driver.Stmt, error) { - return c.PrepareContext(context.Background(), query) -} - -func (c *conn) PrepareContext(_ context.Context, query string) (driver.Stmt, error) { - return stmt{c, query}, nil -} - -func (c *conn) Close() error { - return c.ws.Close() -} - -type tx struct { - c *conn -} - -func (t tx) Commit() error { - _, err := t.c.ExecContext(context.Background(), "COMMIT", nil) - if err != nil { - return err - } - return nil -} - -func (t tx) Rollback() error { - _, err := t.c.ExecContext(context.Background(), "ROLLBACK", nil) - if err != nil { - return err - } - return nil -} - -func (c *conn) Begin() (driver.Tx, error) { - return c.BeginTx(context.Background(), driver.TxOptions{}) -} - -func (c *conn) BeginTx(ctx context.Context, _ driver.TxOptions) (driver.Tx, error) { - _, err := c.ExecContext(ctx, "BEGIN", nil) - if err != nil { - return tx{nil}, err - } - return tx{c}, nil -} - -func convertArgs(args []driver.NamedValue) params { - if len(args) == 0 { - return params{} - } - positionalArgs := [](*driver.NamedValue){} - namedArgs := []namedParam{} - for idx := range args { - if len(args[idx].Name) > 0 { - namedArgs = append(namedArgs, namedParam{args[idx].Name, args[idx].Value}) - } else { - positionalArgs = append(positionalArgs, &args[idx]) - } - } - sort.Slice(positionalArgs, func(i, j int) bool { - return positionalArgs[i].Ordinal < positionalArgs[j].Ordinal - }) - posArgs := [](any){} - for idx := range positionalArgs { - posArgs = append(posArgs, positionalArgs[idx].Value) - } - return params{PositinalArgs: posArgs, NamedArgs: namedArgs} -} - -func (c *conn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) { - res, err := c.ws.exec(ctx, query, convertArgs(args), false) - if err != nil { - return nil, err - } - return &result{res.lastInsertId(), res.affectedRowCount()}, nil -} - -func (c *conn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) { - res, err := c.ws.exec(ctx, query, convertArgs(args), true) - if err != nil { - return nil, err - } - return &rows{res, 0}, nil -} +package ws + +import ( + "context" + "database/sql/driver" + "io" + "sort" +) + +type result struct { + id int64 + changes int64 +} + +func (r *result) LastInsertId() (int64, error) { + return r.id, nil +} + +func (r *result) RowsAffected() (int64, error) { + return r.changes, nil +} + +type rows struct { + res *execResponse + currentRowIdx int +} + +func (r *rows) Columns() []string { + return r.res.columns() +} + +func (r *rows) Close() error { + return nil +} + +func (r *rows) Next(dest []driver.Value) error { + if r.currentRowIdx == r.res.rowsCount() { + return io.EOF + } + count := r.res.rowLen(r.currentRowIdx) + for idx := 0; idx < count; idx++ { + v, err := r.res.value(r.currentRowIdx, idx) + if err != nil { + return err + } + dest[idx] = v + } + r.currentRowIdx++ + return nil +} + +type conn struct { + ws *websocketConn +} + +func Connect(url string, jwt string) (*conn, error) { + c, err := connect(url, jwt) + if err != nil { + return nil, err + } + return &conn{c}, nil +} + +type stmt struct { + c *conn + query string +} + +func (s stmt) Close() error { + return nil +} + +func (s stmt) NumInput() int { + return -1 +} + +func convertToNamed(args []driver.Value) []driver.NamedValue { + if len(args) == 0 { + return nil + } + result := []driver.NamedValue{} + for idx := range args { + result = append(result, driver.NamedValue{Ordinal: idx, Value: args[idx]}) + } + return result +} + +func (s stmt) Exec(args []driver.Value) (driver.Result, error) { + return s.ExecContext(context.Background(), convertToNamed(args)) +} + +func (s stmt) Query(args []driver.Value) (driver.Rows, error) { + return s.QueryContext(context.Background(), convertToNamed(args)) +} + +func (s stmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) { + return s.c.ExecContext(ctx, s.query, args) +} + +func (s stmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) { + return s.c.QueryContext(ctx, s.query, args) +} + +func (c *conn) Ping() error { + return c.PingContext(context.Background()) +} + +func (c *conn) PingContext(ctx context.Context) error { + _, err := c.ws.exec(ctx, "SELECT 1", params{}, false) + return err +} + +func (c *conn) Prepare(query string) (driver.Stmt, error) { + return c.PrepareContext(context.Background(), query) +} + +func (c *conn) PrepareContext(_ context.Context, query string) (driver.Stmt, error) { + return stmt{c, query}, nil +} + +func (c *conn) Close() error { + return c.ws.Close() +} + +type tx struct { + c *conn +} + +func (t tx) Commit() error { + _, err := t.c.ExecContext(context.Background(), "COMMIT", nil) + if err != nil { + return err + } + return nil +} + +func (t tx) Rollback() error { + _, err := t.c.ExecContext(context.Background(), "ROLLBACK", nil) + if err != nil { + return err + } + return nil +} + +func (c *conn) Begin() (driver.Tx, error) { + return c.BeginTx(context.Background(), driver.TxOptions{}) +} + +func (c *conn) BeginTx(ctx context.Context, _ driver.TxOptions) (driver.Tx, error) { + _, err := c.ExecContext(ctx, "BEGIN", nil) + if err != nil { + return tx{nil}, err + } + return tx{c}, nil +} + +func convertArgs(args []driver.NamedValue) params { + if len(args) == 0 { + return params{} + } + positionalArgs := [](*driver.NamedValue){} + namedArgs := []namedParam{} + for idx := range args { + if len(args[idx].Name) > 0 { + namedArgs = append(namedArgs, namedParam{args[idx].Name, args[idx].Value}) + } else { + positionalArgs = append(positionalArgs, &args[idx]) + } + } + sort.Slice(positionalArgs, func(i, j int) bool { + return positionalArgs[i].Ordinal < positionalArgs[j].Ordinal + }) + posArgs := [](any){} + for idx := range positionalArgs { + posArgs = append(posArgs, positionalArgs[idx].Value) + } + return params{PositinalArgs: posArgs, NamedArgs: namedArgs} +} + +func (c *conn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) { + res, err := c.ws.exec(ctx, query, convertArgs(args), false) + if err != nil { + return nil, err + } + return &result{res.lastInsertId(), res.affectedRowCount()}, nil +} + +func (c *conn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) { + res, err := c.ws.exec(ctx, query, convertArgs(args), true) + if err != nil { + return nil, err + } + return &rows{res, 0}, nil +} diff --git a/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/ws/websockets.go b/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/ws/websockets.go index b43ff4f64f..3f38e40594 100644 --- a/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/ws/websockets.go +++ b/vendor/github.com/tursodatabase/libsql-client-go/libsql/internal/ws/websockets.go @@ -1,313 +1,313 @@ -package ws - -import ( - "context" - "database/sql/driver" - "encoding/base64" - "fmt" - "strconv" - "sync" - "time" - - "nhooyr.io/websocket" - "nhooyr.io/websocket/wsjson" -) - -// defaultWSTimeout specifies the timeout used for initial http connection -var defaultWSTimeout = 120 * time.Second - -func errorMsg(errorResp interface{}) string { - return errorResp.(map[string]interface{})["error"].(map[string]interface{})["message"].(string) -} - -func isErrorResp(resp interface{}) bool { - return resp.(map[string]interface{})["type"] == "response_error" -} - -type websocketConn struct { - conn *websocket.Conn - idPool *idPool -} - -type namedParam struct { - Name string - Value any -} - -type params struct { - PositinalArgs []any - NamedArgs []namedParam -} - -func convertValue(v any) (map[string]interface{}, error) { - res := map[string]interface{}{} - if v == nil { - res["type"] = "null" - } else if integer, ok := v.(int64); ok { - res["type"] = "integer" - res["value"] = strconv.FormatInt(integer, 10) - } else if text, ok := v.(string); ok { - res["type"] = "text" - res["value"] = text - } else if blob, ok := v.([]byte); ok { - res["type"] = "blob" - res["base64"] = base64.StdEncoding.WithPadding(base64.NoPadding).EncodeToString(blob) - } else if float, ok := v.(float64); ok { - res["type"] = "float" - res["value"] = float - } else { - return nil, fmt.Errorf("unsupported value type: %s", v) - } - return res, nil -} - -type execResponse struct { - resp map[string]interface{} -} - -func (r *execResponse) affectedRowCount() int64 { - return int64(r.resp["affected_row_count"].(float64)) -} - -func (r *execResponse) lastInsertId() int64 { - id, ok := r.resp["last_insert_rowid"].(string) - if !ok { - return 0 - } - value, _ := strconv.ParseInt(id, 10, 64) - return value -} - -func (r *execResponse) columns() []string { - res := []string{} - cols := r.resp["cols"].([]interface{}) - for idx := range cols { - var v string = "" - if cols[idx].(map[string]interface{})["name"] != nil { - v = cols[idx].(map[string]interface{})["name"].(string) - } - res = append(res, v) - } - return res -} - -func (r *execResponse) rowsCount() int { - return len(r.resp["rows"].([]interface{})) -} - -func (r *execResponse) rowLen(rowIdx int) int { - return len(r.resp["rows"].([]interface{})[rowIdx].([]interface{})) -} - -func (r *execResponse) value(rowIdx int, colIdx int) (any, error) { - val := r.resp["rows"].([]interface{})[rowIdx].([]interface{})[colIdx].(map[string]interface{}) - switch val["type"] { - case "null": - return nil, nil - case "integer": - v, err := strconv.ParseInt(val["value"].(string), 10, 64) - if err != nil { - return nil, err - } - return v, nil - case "text": - return val["value"].(string), nil - case "blob": - base64Encoded := val["base64"].(string) - v, err := base64.StdEncoding.WithPadding(base64.NoPadding).DecodeString(base64Encoded) - if err != nil { - return nil, err - } - return v, nil - case "float": - return val["value"].(float64), nil - } - return nil, fmt.Errorf("unrecognized value type: %s", val["type"]) -} - -func (ws *websocketConn) exec(ctx context.Context, sql string, sqlParams params, wantRows bool) (*execResponse, error) { - requestId := ws.idPool.Get() - defer ws.idPool.Put(requestId) - stmt := map[string]interface{}{ - "sql": sql, - "want_rows": wantRows, - } - if len(sqlParams.PositinalArgs) > 0 { - args := []map[string]interface{}{} - for idx := range sqlParams.PositinalArgs { - v, err := convertValue(sqlParams.PositinalArgs[idx]) - if err != nil { - return nil, err - } - args = append(args, v) - } - stmt["args"] = args - } - if len(sqlParams.NamedArgs) > 0 { - args := []map[string]interface{}{} - for idx := range sqlParams.NamedArgs { - v, err := convertValue(sqlParams.NamedArgs[idx].Value) - if err != nil { - return nil, err - } - arg := map[string]interface{}{ - "name": sqlParams.NamedArgs[idx].Name, - "value": v, - } - args = append(args, arg) - } - stmt["named_args"] = args - } - err := wsjson.Write(ctx, ws.conn, map[string]interface{}{ - "type": "request", - "request_id": requestId, - "request": map[string]interface{}{ - "type": "execute", - "stream_id": 0, - "stmt": stmt, - }, - }) - if err != nil { - return nil, fmt.Errorf("%w: %s", driver.ErrBadConn, err.Error()) - } - - var resp interface{} - if err = wsjson.Read(ctx, ws.conn, &resp); err != nil { - return nil, fmt.Errorf("%w: %s", driver.ErrBadConn, err.Error()) - } - - if isErrorResp(resp) { - err = fmt.Errorf("unable to execute %s: %s", sql, errorMsg(resp)) - return nil, err - } - - return &execResponse{resp.(map[string]interface{})["response"].(map[string]interface{})["result"].(map[string]interface{})}, nil -} - -func (ws *websocketConn) Close() error { - return ws.conn.Close(websocket.StatusNormalClosure, "All's good") -} - -func connect(url string, jwt string) (*websocketConn, error) { - ctx, cancel := context.WithTimeout(context.Background(), defaultWSTimeout) - defer cancel() - c, _, err := websocket.Dial(ctx, url, &websocket.DialOptions{ - Subprotocols: []string{"hrana1"}, - }) - if err != nil { - return nil, err - } - - err = wsjson.Write(ctx, c, map[string]interface{}{ - "type": "hello", - "jwt": jwt, - }) - if err != nil { - c.Close(websocket.StatusInternalError, err.Error()) - return nil, err - } - - err = wsjson.Write(ctx, c, map[string]interface{}{ - "type": "request", - "request_id": 0, - "request": map[string]interface{}{ - "type": "open_stream", - "stream_id": 0, - }, - }) - if err != nil { - c.Close(websocket.StatusInternalError, err.Error()) - return nil, err - } - - var helloResp interface{} - err = wsjson.Read(ctx, c, &helloResp) - if err != nil { - c.Close(websocket.StatusInternalError, err.Error()) - return nil, err - } - if helloResp.(map[string]interface{})["type"] == "hello_error" { - err = fmt.Errorf("handshake error: %s", errorMsg(helloResp)) - c.Close(websocket.StatusProtocolError, err.Error()) - return nil, err - } - - var openStreamResp interface{} - err = wsjson.Read(ctx, c, &openStreamResp) - if err != nil { - c.Close(websocket.StatusInternalError, err.Error()) - return nil, err - } - - if isErrorResp(openStreamResp) { - err = fmt.Errorf("unable to open stream: %s", errorMsg(helloResp)) - c.Close(websocket.StatusProtocolError, err.Error()) - return nil, err - } - return &websocketConn{c, newIDPool()}, nil -} - -// Below is modified IDPool from "vitess.io/vitess/go/pools" - -// idPool is used to ensure that the set of IDs in use concurrently never -// contains any duplicates. The IDs start at 1 and increase without bound, but -// will never be larger than the peak number of concurrent uses. -// -// idPool's Get() and Put() methods can be used concurrently. -type idPool struct { - sync.Mutex - - // used holds the set of values that have been returned to us with Put(). - used map[uint32]bool - // maxUsed remembers the largest value we've given out. - maxUsed uint32 -} - -// NewIDPool creates and initializes an idPool. -func newIDPool() *idPool { - return &idPool{ - used: make(map[uint32]bool), - maxUsed: 0, - } -} - -// Get returns an ID that is unique among currently active users of this pool. -func (pool *idPool) Get() (id uint32) { - pool.Lock() - defer pool.Unlock() - - // Pick a value that's been returned, if any. - for key := range pool.used { - delete(pool.used, key) - return key - } - - // No recycled IDs are available, so increase the pool size. - pool.maxUsed++ - return pool.maxUsed -} - -// Put recycles an ID back into the pool for others to use. Putting back a value -// or 0, or a value that is not currently "checked out", will result in a panic -// because that should never happen except in the case of a programming error. -func (pool *idPool) Put(id uint32) { - pool.Lock() - defer pool.Unlock() - - if id < 1 || id > pool.maxUsed { - panic(fmt.Errorf("idPool.Put(%v): invalid value, must be in the range [1,%v]", id, pool.maxUsed)) - } - - if pool.used[id] { - panic(fmt.Errorf("idPool.Put(%v): can't put value that was already recycled", id)) - } - - // If we're recycling maxUsed, just shrink the pool. - if id == pool.maxUsed { - pool.maxUsed = id - 1 - return - } - - // Add it to the set of recycled IDs. - pool.used[id] = true -} +package ws + +import ( + "context" + "database/sql/driver" + "encoding/base64" + "fmt" + "strconv" + "sync" + "time" + + "nhooyr.io/websocket" + "nhooyr.io/websocket/wsjson" +) + +// defaultWSTimeout specifies the timeout used for initial http connection +var defaultWSTimeout = 120 * time.Second + +func errorMsg(errorResp interface{}) string { + return errorResp.(map[string]interface{})["error"].(map[string]interface{})["message"].(string) +} + +func isErrorResp(resp interface{}) bool { + return resp.(map[string]interface{})["type"] == "response_error" +} + +type websocketConn struct { + conn *websocket.Conn + idPool *idPool +} + +type namedParam struct { + Name string + Value any +} + +type params struct { + PositinalArgs []any + NamedArgs []namedParam +} + +func convertValue(v any) (map[string]interface{}, error) { + res := map[string]interface{}{} + if v == nil { + res["type"] = "null" + } else if integer, ok := v.(int64); ok { + res["type"] = "integer" + res["value"] = strconv.FormatInt(integer, 10) + } else if text, ok := v.(string); ok { + res["type"] = "text" + res["value"] = text + } else if blob, ok := v.([]byte); ok { + res["type"] = "blob" + res["base64"] = base64.StdEncoding.WithPadding(base64.NoPadding).EncodeToString(blob) + } else if float, ok := v.(float64); ok { + res["type"] = "float" + res["value"] = float + } else { + return nil, fmt.Errorf("unsupported value type: %s", v) + } + return res, nil +} + +type execResponse struct { + resp map[string]interface{} +} + +func (r *execResponse) affectedRowCount() int64 { + return int64(r.resp["affected_row_count"].(float64)) +} + +func (r *execResponse) lastInsertId() int64 { + id, ok := r.resp["last_insert_rowid"].(string) + if !ok { + return 0 + } + value, _ := strconv.ParseInt(id, 10, 64) + return value +} + +func (r *execResponse) columns() []string { + res := []string{} + cols := r.resp["cols"].([]interface{}) + for idx := range cols { + var v string = "" + if cols[idx].(map[string]interface{})["name"] != nil { + v = cols[idx].(map[string]interface{})["name"].(string) + } + res = append(res, v) + } + return res +} + +func (r *execResponse) rowsCount() int { + return len(r.resp["rows"].([]interface{})) +} + +func (r *execResponse) rowLen(rowIdx int) int { + return len(r.resp["rows"].([]interface{})[rowIdx].([]interface{})) +} + +func (r *execResponse) value(rowIdx int, colIdx int) (any, error) { + val := r.resp["rows"].([]interface{})[rowIdx].([]interface{})[colIdx].(map[string]interface{}) + switch val["type"] { + case "null": + return nil, nil + case "integer": + v, err := strconv.ParseInt(val["value"].(string), 10, 64) + if err != nil { + return nil, err + } + return v, nil + case "text": + return val["value"].(string), nil + case "blob": + base64Encoded := val["base64"].(string) + v, err := base64.StdEncoding.WithPadding(base64.NoPadding).DecodeString(base64Encoded) + if err != nil { + return nil, err + } + return v, nil + case "float": + return val["value"].(float64), nil + } + return nil, fmt.Errorf("unrecognized value type: %s", val["type"]) +} + +func (ws *websocketConn) exec(ctx context.Context, sql string, sqlParams params, wantRows bool) (*execResponse, error) { + requestId := ws.idPool.Get() + defer ws.idPool.Put(requestId) + stmt := map[string]interface{}{ + "sql": sql, + "want_rows": wantRows, + } + if len(sqlParams.PositinalArgs) > 0 { + args := []map[string]interface{}{} + for idx := range sqlParams.PositinalArgs { + v, err := convertValue(sqlParams.PositinalArgs[idx]) + if err != nil { + return nil, err + } + args = append(args, v) + } + stmt["args"] = args + } + if len(sqlParams.NamedArgs) > 0 { + args := []map[string]interface{}{} + for idx := range sqlParams.NamedArgs { + v, err := convertValue(sqlParams.NamedArgs[idx].Value) + if err != nil { + return nil, err + } + arg := map[string]interface{}{ + "name": sqlParams.NamedArgs[idx].Name, + "value": v, + } + args = append(args, arg) + } + stmt["named_args"] = args + } + err := wsjson.Write(ctx, ws.conn, map[string]interface{}{ + "type": "request", + "request_id": requestId, + "request": map[string]interface{}{ + "type": "execute", + "stream_id": 0, + "stmt": stmt, + }, + }) + if err != nil { + return nil, fmt.Errorf("%w: %s", driver.ErrBadConn, err.Error()) + } + + var resp interface{} + if err = wsjson.Read(ctx, ws.conn, &resp); err != nil { + return nil, fmt.Errorf("%w: %s", driver.ErrBadConn, err.Error()) + } + + if isErrorResp(resp) { + err = fmt.Errorf("unable to execute %s: %s", sql, errorMsg(resp)) + return nil, err + } + + return &execResponse{resp.(map[string]interface{})["response"].(map[string]interface{})["result"].(map[string]interface{})}, nil +} + +func (ws *websocketConn) Close() error { + return ws.conn.Close(websocket.StatusNormalClosure, "All's good") +} + +func connect(url string, jwt string) (*websocketConn, error) { + ctx, cancel := context.WithTimeout(context.Background(), defaultWSTimeout) + defer cancel() + c, _, err := websocket.Dial(ctx, url, &websocket.DialOptions{ + Subprotocols: []string{"hrana1"}, + }) + if err != nil { + return nil, err + } + + err = wsjson.Write(ctx, c, map[string]interface{}{ + "type": "hello", + "jwt": jwt, + }) + if err != nil { + c.Close(websocket.StatusInternalError, err.Error()) + return nil, err + } + + err = wsjson.Write(ctx, c, map[string]interface{}{ + "type": "request", + "request_id": 0, + "request": map[string]interface{}{ + "type": "open_stream", + "stream_id": 0, + }, + }) + if err != nil { + c.Close(websocket.StatusInternalError, err.Error()) + return nil, err + } + + var helloResp interface{} + err = wsjson.Read(ctx, c, &helloResp) + if err != nil { + c.Close(websocket.StatusInternalError, err.Error()) + return nil, err + } + if helloResp.(map[string]interface{})["type"] == "hello_error" { + err = fmt.Errorf("handshake error: %s", errorMsg(helloResp)) + c.Close(websocket.StatusProtocolError, err.Error()) + return nil, err + } + + var openStreamResp interface{} + err = wsjson.Read(ctx, c, &openStreamResp) + if err != nil { + c.Close(websocket.StatusInternalError, err.Error()) + return nil, err + } + + if isErrorResp(openStreamResp) { + err = fmt.Errorf("unable to open stream: %s", errorMsg(helloResp)) + c.Close(websocket.StatusProtocolError, err.Error()) + return nil, err + } + return &websocketConn{c, newIDPool()}, nil +} + +// Below is modified IDPool from "vitess.io/vitess/go/pools" + +// idPool is used to ensure that the set of IDs in use concurrently never +// contains any duplicates. The IDs start at 1 and increase without bound, but +// will never be larger than the peak number of concurrent uses. +// +// idPool's Get() and Put() methods can be used concurrently. +type idPool struct { + sync.Mutex + + // used holds the set of values that have been returned to us with Put(). + used map[uint32]bool + // maxUsed remembers the largest value we've given out. + maxUsed uint32 +} + +// NewIDPool creates and initializes an idPool. +func newIDPool() *idPool { + return &idPool{ + used: make(map[uint32]bool), + maxUsed: 0, + } +} + +// Get returns an ID that is unique among currently active users of this pool. +func (pool *idPool) Get() (id uint32) { + pool.Lock() + defer pool.Unlock() + + // Pick a value that's been returned, if any. + for key := range pool.used { + delete(pool.used, key) + return key + } + + // No recycled IDs are available, so increase the pool size. + pool.maxUsed++ + return pool.maxUsed +} + +// Put recycles an ID back into the pool for others to use. Putting back a value +// or 0, or a value that is not currently "checked out", will result in a panic +// because that should never happen except in the case of a programming error. +func (pool *idPool) Put(id uint32) { + pool.Lock() + defer pool.Unlock() + + if id < 1 || id > pool.maxUsed { + panic(fmt.Errorf("idPool.Put(%v): invalid value, must be in the range [1,%v]", id, pool.maxUsed)) + } + + if pool.used[id] { + panic(fmt.Errorf("idPool.Put(%v): can't put value that was already recycled", id)) + } + + // If we're recycling maxUsed, just shrink the pool. + if id == pool.maxUsed { + pool.maxUsed = id - 1 + return + } + + // Add it to the set of recycled IDs. + pool.used[id] = true +} diff --git a/vendor/github.com/tursodatabase/libsql-client-go/libsql/sql.go b/vendor/github.com/tursodatabase/libsql-client-go/libsql/sql.go index 52ee21d4f9..c05f10d5fe 100644 --- a/vendor/github.com/tursodatabase/libsql-client-go/libsql/sql.go +++ b/vendor/github.com/tursodatabase/libsql-client-go/libsql/sql.go @@ -1,348 +1,348 @@ -package libsql - -import ( - "context" - "database/sql" - "database/sql/driver" - "errors" - "fmt" - "net/url" - "strings" - - "github.com/tursodatabase/libsql-client-go/libsql/internal/http" - "github.com/tursodatabase/libsql-client-go/libsql/internal/ws" -) - -type config struct { - authToken *string - tls *bool - proxy *string -} - -type Option interface { - apply(*config) error -} - -type option func(*config) error - -func (o option) apply(c *config) error { - return o(c) -} - -func WithAuthToken(authToken string) Option { - return option(func(o *config) error { - if o.authToken != nil { - return fmt.Errorf("authToken already set") - } - if authToken == "" { - return fmt.Errorf("authToken must not be empty") - } - o.authToken = &authToken - return nil - }) -} - -func WithTls(tls bool) Option { - return option(func(o *config) error { - if o.tls != nil { - return fmt.Errorf("tls already set") - } - o.tls = &tls - return nil - }) -} - -func WithProxy(proxy string) Option { - return option(func(o *config) error { - if o.proxy != nil { - return fmt.Errorf("proxy already set") - } - if proxy == "" { - return fmt.Errorf("proxy must not be empty") - } - o.proxy = &proxy - return nil - }) -} - -func (c config) connector(dbPath string) (driver.Connector, error) { - u, err := url.Parse(dbPath) - if err != nil { - return nil, err - } - if u.Scheme == "file" { - if strings.HasPrefix(dbPath, "file://") && !strings.HasPrefix(dbPath, "file:///") { - return nil, fmt.Errorf("invalid database URL: %s. File URLs should not have double leading slashes. ", dbPath) - } - expectedDrivers := []string{"sqlite", "sqlite3"} - presentDrivers := sql.Drivers() - for _, expectedDriver := range expectedDrivers { - if Contains(presentDrivers, expectedDriver) { - db, err := sql.Open(expectedDriver, dbPath) - if err != nil { - return nil, err - } - return &fileConnector{url: dbPath, driver: db.Driver()}, nil - } - } - return nil, fmt.Errorf("no sqlite driver present. Please import sqlite or sqlite3 driver") - } - - query := u.Query() - if query.Has("auth_token") { - return nil, fmt.Errorf("'auth_token' usage forbidden. Please use 'WithAuthToken' option instead") - } - if query.Has("authToken") { - return nil, fmt.Errorf("'authToken' usage forbidden. Please use 'WithAuthToken' option instead") - } - if query.Has("jwt") { - return nil, fmt.Errorf("'jwt' usage forbidden. Please use 'WithAuthToken' option instead") - } - if query.Has("tls") { - return nil, fmt.Errorf("'tls' usage forbidden. Please use 'WithTls' option instead") - } - - for name := range query { - return nil, fmt.Errorf("unknown query parameter %#v", name) - } - - if u.Scheme == "libsql" { - if c.tls == nil || *c.tls { - u.Scheme = "https" - } else { - if c.tls != nil && u.Port() == "" { - return nil, fmt.Errorf("libsql:// URL without tls must specify an explicit port") - } - u.Scheme = "http" - } - } - - if (u.Scheme == "wss" || u.Scheme == "https") && c.tls != nil && !*c.tls { - return nil, fmt.Errorf("%s:// URL cannot opt out of TLS. Only libsql:// can opt in/out of TLS", u.Scheme) - } - if (u.Scheme == "ws" || u.Scheme == "http") && c.tls != nil && *c.tls { - return nil, fmt.Errorf("%s:// URL cannot opt in to TLS. Only libsql:// can opt in/out of TLS", u.Scheme) - } - - authToken := "" - if c.authToken != nil { - authToken = *c.authToken - } - - host := u.Host - if c.proxy != nil { - if u.Scheme == "ws" || u.Scheme == "wss" { - return nil, fmt.Errorf("proxying of ws:// and wss:// URLs is not supported") - } - proxy, err := url.Parse(*c.proxy) - if err != nil { - return nil, err - } - u.Host = proxy.Host - if proxy.Scheme != "" { - u.Scheme = proxy.Scheme - } - } - - if u.Scheme == "wss" || u.Scheme == "ws" { - return wsConnector{url: u.String(), authToken: authToken}, nil - } - if u.Scheme == "https" || u.Scheme == "http" { - return httpConnector{url: u.String(), authToken: authToken, host: host}, nil - } - - return nil, fmt.Errorf("unsupported URL scheme: %s\nThis driver supports only URLs that start with libsql://, file://, https://, http://, wss:// and ws://", u.Scheme) -} - -func NewConnector(dbPath string, opts ...Option) (driver.Connector, error) { - var config config - errs := make([]error, 0, len(opts)) - for _, opt := range opts { - if err := opt.apply(&config); err != nil { - errs = append(errs, err) - } - } - if len(errs) > 0 { - return nil, errors.Join(errs...) - } - return config.connector(dbPath) -} - -type httpConnector struct { - url string - authToken string - host string -} - -func (c httpConnector) Connect(_ctx context.Context) (driver.Conn, error) { - return http.Connect(c.url, c.authToken, c.host), nil -} - -func (c httpConnector) Driver() driver.Driver { - return Driver{} -} - -type wsConnector struct { - url string - authToken string -} - -func (c wsConnector) Connect(_ctx context.Context) (driver.Conn, error) { - return ws.Connect(c.url, c.authToken) -} - -func (c wsConnector) Driver() driver.Driver { - return Driver{} -} - -type fileConnector struct { - url string - driver driver.Driver -} - -func (c fileConnector) Connect(_ctx context.Context) (driver.Conn, error) { - return c.driver.Open(c.url) -} - -func (c fileConnector) Driver() driver.Driver { - return Driver{} -} - -type Driver struct { -} - -// ExtractJwt extracts the JWT from the URL and removes it from the url. -func extractJwt(query *url.Values) (string, error) { - authTokenSnake := query.Get("auth_token") - authTokenCamel := query.Get("authToken") - jwt := query.Get("jwt") - query.Del("auth_token") - query.Del("authToken") - query.Del("jwt") - - countNonEmpty := func(slice ...string) int { - count := 0 - for _, s := range slice { - if s != "" { - count++ - } - } - return count - } - - if countNonEmpty(authTokenSnake, authTokenCamel, jwt) > 1 { - return "", fmt.Errorf("please use at most one of the following query parameters: 'auth_token', 'authToken', 'jwt'") - } - - if authTokenSnake != "" { - return authTokenSnake, nil - } else if authTokenCamel != "" { - return authTokenCamel, nil - } else { - return jwt, nil - } -} - -func extractTls(query *url.Values, scheme string) (bool, error) { - tls := query.Get("tls") - query.Del("tls") - if tls == "" { - if scheme == "http" || scheme == "ws" { - return false, nil - } else { - return true, nil - } - } else if tls == "0" { - return false, nil - } else if tls == "1" { - return true, nil - } else { - return true, fmt.Errorf("unknown value of tls query parameter. Valid values are 0 and 1") - } -} - -func (d Driver) Open(dbUrl string) (driver.Conn, error) { - u, err := url.Parse(dbUrl) - if err != nil { - return nil, err - } - if u.Scheme == "file" { - if strings.HasPrefix(dbUrl, "file://") && !strings.HasPrefix(dbUrl, "file:///") { - return nil, fmt.Errorf("invalid database URL: %s. File URLs should not have double leading slashes. ", dbUrl) - } - expectedDrivers := []string{"sqlite", "sqlite3"} - presentDrivers := sql.Drivers() - for _, expectedDriver := range expectedDrivers { - if Contains(presentDrivers, expectedDriver) { - db, err := sql.Open(expectedDriver, dbUrl) - if err != nil { - return nil, err - } - return db.Driver().Open(dbUrl) - } - } - return nil, fmt.Errorf("no sqlite driver present. Please import sqlite or sqlite3 driver") - } - - query := u.Query() - jwt, err := extractJwt(&query) - if err != nil { - return nil, err - } - - tls, err := extractTls(&query, u.Scheme) - if err != nil { - return nil, err - } - - for name := range query { - return nil, fmt.Errorf("unknown query parameter %#v", name) - } - u.RawQuery = "" - - if u.Scheme == "libsql" { - if tls { - u.Scheme = "https" - } else { - if u.Port() == "" { - return nil, fmt.Errorf("libsql:// URL with ?tls=0 must specify an explicit port") - } - u.Scheme = "http" - } - } - - if (u.Scheme == "wss" || u.Scheme == "https") && !tls { - return nil, fmt.Errorf("%s:// URL cannot opt out of TLS using ?tls=0", u.Scheme) - } - if (u.Scheme == "ws" || u.Scheme == "http") && tls { - return nil, fmt.Errorf("%s:// URL cannot opt in to TLS using ?tls=1", u.Scheme) - } - - if u.Scheme == "wss" || u.Scheme == "ws" { - return ws.Connect(u.String(), jwt) - } - if u.Scheme == "https" || u.Scheme == "http" { - return http.Connect(u.String(), jwt, u.Host), nil - } - - return nil, fmt.Errorf("unsupported URL scheme: %s\nThis driver supports only URLs that start with libsql://, file://, https://, http://, wss:// and ws://", u.Scheme) -} - -func init() { - sql.Register("libsql", Driver{}) -} - -// backported from Go 1.21 - -func Contains[S ~[]E, E comparable](s S, v E) bool { - return Index(s, v) >= 0 -} - -func Index[S ~[]E, E comparable](s S, v E) int { - for i := range s { - if v == s[i] { - return i - } - } - return -1 -} +package libsql + +import ( + "context" + "database/sql" + "database/sql/driver" + "errors" + "fmt" + "net/url" + "strings" + + "github.com/tursodatabase/libsql-client-go/libsql/internal/http" + "github.com/tursodatabase/libsql-client-go/libsql/internal/ws" +) + +type config struct { + authToken *string + tls *bool + proxy *string +} + +type Option interface { + apply(*config) error +} + +type option func(*config) error + +func (o option) apply(c *config) error { + return o(c) +} + +func WithAuthToken(authToken string) Option { + return option(func(o *config) error { + if o.authToken != nil { + return fmt.Errorf("authToken already set") + } + if authToken == "" { + return fmt.Errorf("authToken must not be empty") + } + o.authToken = &authToken + return nil + }) +} + +func WithTls(tls bool) Option { + return option(func(o *config) error { + if o.tls != nil { + return fmt.Errorf("tls already set") + } + o.tls = &tls + return nil + }) +} + +func WithProxy(proxy string) Option { + return option(func(o *config) error { + if o.proxy != nil { + return fmt.Errorf("proxy already set") + } + if proxy == "" { + return fmt.Errorf("proxy must not be empty") + } + o.proxy = &proxy + return nil + }) +} + +func (c config) connector(dbPath string) (driver.Connector, error) { + u, err := url.Parse(dbPath) + if err != nil { + return nil, err + } + if u.Scheme == "file" { + if strings.HasPrefix(dbPath, "file://") && !strings.HasPrefix(dbPath, "file:///") { + return nil, fmt.Errorf("invalid database URL: %s. File URLs should not have double leading slashes. ", dbPath) + } + expectedDrivers := []string{"sqlite", "sqlite3"} + presentDrivers := sql.Drivers() + for _, expectedDriver := range expectedDrivers { + if Contains(presentDrivers, expectedDriver) { + db, err := sql.Open(expectedDriver, dbPath) + if err != nil { + return nil, err + } + return &fileConnector{url: dbPath, driver: db.Driver()}, nil + } + } + return nil, fmt.Errorf("no sqlite driver present. Please import sqlite or sqlite3 driver") + } + + query := u.Query() + if query.Has("auth_token") { + return nil, fmt.Errorf("'auth_token' usage forbidden. Please use 'WithAuthToken' option instead") + } + if query.Has("authToken") { + return nil, fmt.Errorf("'authToken' usage forbidden. Please use 'WithAuthToken' option instead") + } + if query.Has("jwt") { + return nil, fmt.Errorf("'jwt' usage forbidden. Please use 'WithAuthToken' option instead") + } + if query.Has("tls") { + return nil, fmt.Errorf("'tls' usage forbidden. Please use 'WithTls' option instead") + } + + for name := range query { + return nil, fmt.Errorf("unknown query parameter %#v", name) + } + + if u.Scheme == "libsql" { + if c.tls == nil || *c.tls { + u.Scheme = "https" + } else { + if c.tls != nil && u.Port() == "" { + return nil, fmt.Errorf("libsql:// URL without tls must specify an explicit port") + } + u.Scheme = "http" + } + } + + if (u.Scheme == "wss" || u.Scheme == "https") && c.tls != nil && !*c.tls { + return nil, fmt.Errorf("%s:// URL cannot opt out of TLS. Only libsql:// can opt in/out of TLS", u.Scheme) + } + if (u.Scheme == "ws" || u.Scheme == "http") && c.tls != nil && *c.tls { + return nil, fmt.Errorf("%s:// URL cannot opt in to TLS. Only libsql:// can opt in/out of TLS", u.Scheme) + } + + authToken := "" + if c.authToken != nil { + authToken = *c.authToken + } + + host := u.Host + if c.proxy != nil { + if u.Scheme == "ws" || u.Scheme == "wss" { + return nil, fmt.Errorf("proxying of ws:// and wss:// URLs is not supported") + } + proxy, err := url.Parse(*c.proxy) + if err != nil { + return nil, err + } + u.Host = proxy.Host + if proxy.Scheme != "" { + u.Scheme = proxy.Scheme + } + } + + if u.Scheme == "wss" || u.Scheme == "ws" { + return wsConnector{url: u.String(), authToken: authToken}, nil + } + if u.Scheme == "https" || u.Scheme == "http" { + return httpConnector{url: u.String(), authToken: authToken, host: host}, nil + } + + return nil, fmt.Errorf("unsupported URL scheme: %s\nThis driver supports only URLs that start with libsql://, file://, https://, http://, wss:// and ws://", u.Scheme) +} + +func NewConnector(dbPath string, opts ...Option) (driver.Connector, error) { + var config config + errs := make([]error, 0, len(opts)) + for _, opt := range opts { + if err := opt.apply(&config); err != nil { + errs = append(errs, err) + } + } + if len(errs) > 0 { + return nil, errors.Join(errs...) + } + return config.connector(dbPath) +} + +type httpConnector struct { + url string + authToken string + host string +} + +func (c httpConnector) Connect(_ctx context.Context) (driver.Conn, error) { + return http.Connect(c.url, c.authToken, c.host), nil +} + +func (c httpConnector) Driver() driver.Driver { + return Driver{} +} + +type wsConnector struct { + url string + authToken string +} + +func (c wsConnector) Connect(_ctx context.Context) (driver.Conn, error) { + return ws.Connect(c.url, c.authToken) +} + +func (c wsConnector) Driver() driver.Driver { + return Driver{} +} + +type fileConnector struct { + url string + driver driver.Driver +} + +func (c fileConnector) Connect(_ctx context.Context) (driver.Conn, error) { + return c.driver.Open(c.url) +} + +func (c fileConnector) Driver() driver.Driver { + return Driver{} +} + +type Driver struct { +} + +// ExtractJwt extracts the JWT from the URL and removes it from the url. +func extractJwt(query *url.Values) (string, error) { + authTokenSnake := query.Get("auth_token") + authTokenCamel := query.Get("authToken") + jwt := query.Get("jwt") + query.Del("auth_token") + query.Del("authToken") + query.Del("jwt") + + countNonEmpty := func(slice ...string) int { + count := 0 + for _, s := range slice { + if s != "" { + count++ + } + } + return count + } + + if countNonEmpty(authTokenSnake, authTokenCamel, jwt) > 1 { + return "", fmt.Errorf("please use at most one of the following query parameters: 'auth_token', 'authToken', 'jwt'") + } + + if authTokenSnake != "" { + return authTokenSnake, nil + } else if authTokenCamel != "" { + return authTokenCamel, nil + } else { + return jwt, nil + } +} + +func extractTls(query *url.Values, scheme string) (bool, error) { + tls := query.Get("tls") + query.Del("tls") + if tls == "" { + if scheme == "http" || scheme == "ws" { + return false, nil + } else { + return true, nil + } + } else if tls == "0" { + return false, nil + } else if tls == "1" { + return true, nil + } else { + return true, fmt.Errorf("unknown value of tls query parameter. Valid values are 0 and 1") + } +} + +func (d Driver) Open(dbUrl string) (driver.Conn, error) { + u, err := url.Parse(dbUrl) + if err != nil { + return nil, err + } + if u.Scheme == "file" { + if strings.HasPrefix(dbUrl, "file://") && !strings.HasPrefix(dbUrl, "file:///") { + return nil, fmt.Errorf("invalid database URL: %s. File URLs should not have double leading slashes. ", dbUrl) + } + expectedDrivers := []string{"sqlite", "sqlite3"} + presentDrivers := sql.Drivers() + for _, expectedDriver := range expectedDrivers { + if Contains(presentDrivers, expectedDriver) { + db, err := sql.Open(expectedDriver, dbUrl) + if err != nil { + return nil, err + } + return db.Driver().Open(dbUrl) + } + } + return nil, fmt.Errorf("no sqlite driver present. Please import sqlite or sqlite3 driver") + } + + query := u.Query() + jwt, err := extractJwt(&query) + if err != nil { + return nil, err + } + + tls, err := extractTls(&query, u.Scheme) + if err != nil { + return nil, err + } + + for name := range query { + return nil, fmt.Errorf("unknown query parameter %#v", name) + } + u.RawQuery = "" + + if u.Scheme == "libsql" { + if tls { + u.Scheme = "https" + } else { + if u.Port() == "" { + return nil, fmt.Errorf("libsql:// URL with ?tls=0 must specify an explicit port") + } + u.Scheme = "http" + } + } + + if (u.Scheme == "wss" || u.Scheme == "https") && !tls { + return nil, fmt.Errorf("%s:// URL cannot opt out of TLS using ?tls=0", u.Scheme) + } + if (u.Scheme == "ws" || u.Scheme == "http") && tls { + return nil, fmt.Errorf("%s:// URL cannot opt in to TLS using ?tls=1", u.Scheme) + } + + if u.Scheme == "wss" || u.Scheme == "ws" { + return ws.Connect(u.String(), jwt) + } + if u.Scheme == "https" || u.Scheme == "http" { + return http.Connect(u.String(), jwt, u.Host), nil + } + + return nil, fmt.Errorf("unsupported URL scheme: %s\nThis driver supports only URLs that start with libsql://, file://, https://, http://, wss:// and ws://", u.Scheme) +} + +func init() { + sql.Register("libsql", Driver{}) +} + +// backported from Go 1.21 + +func Contains[S ~[]E, E comparable](s S, v E) bool { + return Index(s, v) >= 0 +} + +func Index[S ~[]E, E comparable](s S, v E) int { + for i := range s { + if v == s[i] { + return i + } + } + return -1 +} diff --git a/vendor/golang.org/x/exp/LICENSE b/vendor/golang.org/x/exp/LICENSE index 6a66aea5ea..d71c2f1e5f 100644 --- a/vendor/golang.org/x/exp/LICENSE +++ b/vendor/golang.org/x/exp/LICENSE @@ -1,27 +1,27 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/exp/PATENTS b/vendor/golang.org/x/exp/PATENTS index 733099041f..35b0f0dc58 100644 --- a/vendor/golang.org/x/exp/PATENTS +++ b/vendor/golang.org/x/exp/PATENTS @@ -1,22 +1,22 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/exp/constraints/constraints.go b/vendor/golang.org/x/exp/constraints/constraints.go index 2c033dff47..6a23534844 100644 --- a/vendor/golang.org/x/exp/constraints/constraints.go +++ b/vendor/golang.org/x/exp/constraints/constraints.go @@ -1,50 +1,50 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package constraints defines a set of useful constraints to be used -// with type parameters. -package constraints - -// Signed is a constraint that permits any signed integer type. -// If future releases of Go add new predeclared signed integer types, -// this constraint will be modified to include them. -type Signed interface { - ~int | ~int8 | ~int16 | ~int32 | ~int64 -} - -// Unsigned is a constraint that permits any unsigned integer type. -// If future releases of Go add new predeclared unsigned integer types, -// this constraint will be modified to include them. -type Unsigned interface { - ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr -} - -// Integer is a constraint that permits any integer type. -// If future releases of Go add new predeclared integer types, -// this constraint will be modified to include them. -type Integer interface { - Signed | Unsigned -} - -// Float is a constraint that permits any floating-point type. -// If future releases of Go add new predeclared floating-point types, -// this constraint will be modified to include them. -type Float interface { - ~float32 | ~float64 -} - -// Complex is a constraint that permits any complex numeric type. -// If future releases of Go add new predeclared complex numeric types, -// this constraint will be modified to include them. -type Complex interface { - ~complex64 | ~complex128 -} - -// Ordered is a constraint that permits any ordered type: any type -// that supports the operators < <= >= >. -// If future releases of Go add new ordered types, -// this constraint will be modified to include them. -type Ordered interface { - Integer | Float | ~string -} +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package constraints defines a set of useful constraints to be used +// with type parameters. +package constraints + +// Signed is a constraint that permits any signed integer type. +// If future releases of Go add new predeclared signed integer types, +// this constraint will be modified to include them. +type Signed interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 +} + +// Unsigned is a constraint that permits any unsigned integer type. +// If future releases of Go add new predeclared unsigned integer types, +// this constraint will be modified to include them. +type Unsigned interface { + ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr +} + +// Integer is a constraint that permits any integer type. +// If future releases of Go add new predeclared integer types, +// this constraint will be modified to include them. +type Integer interface { + Signed | Unsigned +} + +// Float is a constraint that permits any floating-point type. +// If future releases of Go add new predeclared floating-point types, +// this constraint will be modified to include them. +type Float interface { + ~float32 | ~float64 +} + +// Complex is a constraint that permits any complex numeric type. +// If future releases of Go add new predeclared complex numeric types, +// this constraint will be modified to include them. +type Complex interface { + ~complex64 | ~complex128 +} + +// Ordered is a constraint that permits any ordered type: any type +// that supports the operators < <= >= >. +// If future releases of Go add new ordered types, +// this constraint will be modified to include them. +type Ordered interface { + Integer | Float | ~string +} diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go index 8a237c5d61..c50c72f346 100644 --- a/vendor/golang.org/x/exp/slices/slices.go +++ b/vendor/golang.org/x/exp/slices/slices.go @@ -1,218 +1,218 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package slices defines various functions useful with slices of any type. -// Unless otherwise specified, these functions all apply to the elements -// of a slice at index 0 <= i < len(s). -// -// Note that the less function in IsSortedFunc, SortFunc, SortStableFunc requires a -// strict weak ordering (https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings), -// or the sorting may fail to sort correctly. A common case is when sorting slices of -// floating-point numbers containing NaN values. -package slices - -import "golang.org/x/exp/constraints" - -// Equal reports whether two slices are equal: the same length and all -// elements equal. If the lengths are different, Equal returns false. -// Otherwise, the elements are compared in increasing index order, and the -// comparison stops at the first unequal pair. -// Floating point NaNs are not considered equal. -func Equal[E comparable](s1, s2 []E) bool { - if len(s1) != len(s2) { - return false - } - for i := range s1 { - if s1[i] != s2[i] { - return false - } - } - return true -} - -// EqualFunc reports whether two slices are equal using a comparison -// function on each pair of elements. If the lengths are different, -// EqualFunc returns false. Otherwise, the elements are compared in -// increasing index order, and the comparison stops at the first index -// for which eq returns false. -func EqualFunc[E1, E2 any](s1 []E1, s2 []E2, eq func(E1, E2) bool) bool { - if len(s1) != len(s2) { - return false - } - for i, v1 := range s1 { - v2 := s2[i] - if !eq(v1, v2) { - return false - } - } - return true -} - -// Compare compares the elements of s1 and s2. -// The elements are compared sequentially, starting at index 0, -// until one element is not equal to the other. -// The result of comparing the first non-matching elements is returned. -// If both slices are equal until one of them ends, the shorter slice is -// considered less than the longer one. -// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2. -// Comparisons involving floating point NaNs are ignored. -func Compare[E constraints.Ordered](s1, s2 []E) int { - s2len := len(s2) - for i, v1 := range s1 { - if i >= s2len { - return +1 - } - v2 := s2[i] - switch { - case v1 < v2: - return -1 - case v1 > v2: - return +1 - } - } - if len(s1) < s2len { - return -1 - } - return 0 -} - -// CompareFunc is like Compare but uses a comparison function -// on each pair of elements. The elements are compared in increasing -// index order, and the comparisons stop after the first time cmp -// returns non-zero. -// The result is the first non-zero result of cmp; if cmp always -// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2), -// and +1 if len(s1) > len(s2). -func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int { - s2len := len(s2) - for i, v1 := range s1 { - if i >= s2len { - return +1 - } - v2 := s2[i] - if c := cmp(v1, v2); c != 0 { - return c - } - } - if len(s1) < s2len { - return -1 - } - return 0 -} - -// Index returns the index of the first occurrence of v in s, -// or -1 if not present. -func Index[E comparable](s []E, v E) int { - for i, vs := range s { - if v == vs { - return i - } - } - return -1 -} - -// IndexFunc returns the first index i satisfying f(s[i]), -// or -1 if none do. -func IndexFunc[E any](s []E, f func(E) bool) int { - for i, v := range s { - if f(v) { - return i - } - } - return -1 -} - -// Contains reports whether v is present in s. -func Contains[E comparable](s []E, v E) bool { - return Index(s, v) >= 0 -} - -// Insert inserts the values v... into s at index i, -// returning the modified slice. -// In the returned slice r, r[i] == v[0]. -// Insert panics if i is out of range. -// This function is O(len(s) + len(v)). -func Insert[S ~[]E, E any](s S, i int, v ...E) S { - tot := len(s) + len(v) - if tot <= cap(s) { - s2 := s[:tot] - copy(s2[i+len(v):], s[i:]) - copy(s2[i:], v) - return s2 - } - s2 := make(S, tot) - copy(s2, s[:i]) - copy(s2[i:], v) - copy(s2[i+len(v):], s[i:]) - return s2 -} - -// Delete removes the elements s[i:j] from s, returning the modified slice. -// Delete panics if s[i:j] is not a valid slice of s. -// Delete modifies the contents of the slice s; it does not create a new slice. -// Delete is O(len(s)-(j-i)), so if many items must be deleted, it is better to -// make a single call deleting them all together than to delete one at a time. -func Delete[S ~[]E, E any](s S, i, j int) S { - return append(s[:i], s[j:]...) -} - -// Clone returns a copy of the slice. -// The elements are copied using assignment, so this is a shallow clone. -func Clone[S ~[]E, E any](s S) S { - // Preserve nil in case it matters. - if s == nil { - return nil - } - return append(S([]E{}), s...) -} - -// Compact replaces consecutive runs of equal elements with a single copy. -// This is like the uniq command found on Unix. -// Compact modifies the contents of the slice s; it does not create a new slice. -func Compact[S ~[]E, E comparable](s S) S { - if len(s) == 0 { - return s - } - i := 1 - last := s[0] - for _, v := range s[1:] { - if v != last { - s[i] = v - i++ - last = v - } - } - return s[:i] -} - -// CompactFunc is like Compact but uses a comparison function. -func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { - if len(s) == 0 { - return s - } - i := 1 - last := s[0] - for _, v := range s[1:] { - if !eq(v, last) { - s[i] = v - i++ - last = v - } - } - return s[:i] -} - -// Grow increases the slice's capacity, if necessary, to guarantee space for -// another n elements. After Grow(n), at least n elements can be appended -// to the slice without another allocation. Grow may modify elements of the -// slice between the length and the capacity. If n is negative or too large to -// allocate the memory, Grow panics. -func Grow[S ~[]E, E any](s S, n int) S { - return append(s, make(S, n)...)[:len(s)] -} - -// Clip removes unused capacity from the slice, returning s[:len(s):len(s)]. -func Clip[S ~[]E, E any](s S) S { - return s[:len(s):len(s)] -} +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package slices defines various functions useful with slices of any type. +// Unless otherwise specified, these functions all apply to the elements +// of a slice at index 0 <= i < len(s). +// +// Note that the less function in IsSortedFunc, SortFunc, SortStableFunc requires a +// strict weak ordering (https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings), +// or the sorting may fail to sort correctly. A common case is when sorting slices of +// floating-point numbers containing NaN values. +package slices + +import "golang.org/x/exp/constraints" + +// Equal reports whether two slices are equal: the same length and all +// elements equal. If the lengths are different, Equal returns false. +// Otherwise, the elements are compared in increasing index order, and the +// comparison stops at the first unequal pair. +// Floating point NaNs are not considered equal. +func Equal[E comparable](s1, s2 []E) bool { + if len(s1) != len(s2) { + return false + } + for i := range s1 { + if s1[i] != s2[i] { + return false + } + } + return true +} + +// EqualFunc reports whether two slices are equal using a comparison +// function on each pair of elements. If the lengths are different, +// EqualFunc returns false. Otherwise, the elements are compared in +// increasing index order, and the comparison stops at the first index +// for which eq returns false. +func EqualFunc[E1, E2 any](s1 []E1, s2 []E2, eq func(E1, E2) bool) bool { + if len(s1) != len(s2) { + return false + } + for i, v1 := range s1 { + v2 := s2[i] + if !eq(v1, v2) { + return false + } + } + return true +} + +// Compare compares the elements of s1 and s2. +// The elements are compared sequentially, starting at index 0, +// until one element is not equal to the other. +// The result of comparing the first non-matching elements is returned. +// If both slices are equal until one of them ends, the shorter slice is +// considered less than the longer one. +// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2. +// Comparisons involving floating point NaNs are ignored. +func Compare[E constraints.Ordered](s1, s2 []E) int { + s2len := len(s2) + for i, v1 := range s1 { + if i >= s2len { + return +1 + } + v2 := s2[i] + switch { + case v1 < v2: + return -1 + case v1 > v2: + return +1 + } + } + if len(s1) < s2len { + return -1 + } + return 0 +} + +// CompareFunc is like Compare but uses a comparison function +// on each pair of elements. The elements are compared in increasing +// index order, and the comparisons stop after the first time cmp +// returns non-zero. +// The result is the first non-zero result of cmp; if cmp always +// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2), +// and +1 if len(s1) > len(s2). +func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int { + s2len := len(s2) + for i, v1 := range s1 { + if i >= s2len { + return +1 + } + v2 := s2[i] + if c := cmp(v1, v2); c != 0 { + return c + } + } + if len(s1) < s2len { + return -1 + } + return 0 +} + +// Index returns the index of the first occurrence of v in s, +// or -1 if not present. +func Index[E comparable](s []E, v E) int { + for i, vs := range s { + if v == vs { + return i + } + } + return -1 +} + +// IndexFunc returns the first index i satisfying f(s[i]), +// or -1 if none do. +func IndexFunc[E any](s []E, f func(E) bool) int { + for i, v := range s { + if f(v) { + return i + } + } + return -1 +} + +// Contains reports whether v is present in s. +func Contains[E comparable](s []E, v E) bool { + return Index(s, v) >= 0 +} + +// Insert inserts the values v... into s at index i, +// returning the modified slice. +// In the returned slice r, r[i] == v[0]. +// Insert panics if i is out of range. +// This function is O(len(s) + len(v)). +func Insert[S ~[]E, E any](s S, i int, v ...E) S { + tot := len(s) + len(v) + if tot <= cap(s) { + s2 := s[:tot] + copy(s2[i+len(v):], s[i:]) + copy(s2[i:], v) + return s2 + } + s2 := make(S, tot) + copy(s2, s[:i]) + copy(s2[i:], v) + copy(s2[i+len(v):], s[i:]) + return s2 +} + +// Delete removes the elements s[i:j] from s, returning the modified slice. +// Delete panics if s[i:j] is not a valid slice of s. +// Delete modifies the contents of the slice s; it does not create a new slice. +// Delete is O(len(s)-(j-i)), so if many items must be deleted, it is better to +// make a single call deleting them all together than to delete one at a time. +func Delete[S ~[]E, E any](s S, i, j int) S { + return append(s[:i], s[j:]...) +} + +// Clone returns a copy of the slice. +// The elements are copied using assignment, so this is a shallow clone. +func Clone[S ~[]E, E any](s S) S { + // Preserve nil in case it matters. + if s == nil { + return nil + } + return append(S([]E{}), s...) +} + +// Compact replaces consecutive runs of equal elements with a single copy. +// This is like the uniq command found on Unix. +// Compact modifies the contents of the slice s; it does not create a new slice. +func Compact[S ~[]E, E comparable](s S) S { + if len(s) == 0 { + return s + } + i := 1 + last := s[0] + for _, v := range s[1:] { + if v != last { + s[i] = v + i++ + last = v + } + } + return s[:i] +} + +// CompactFunc is like Compact but uses a comparison function. +func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { + if len(s) == 0 { + return s + } + i := 1 + last := s[0] + for _, v := range s[1:] { + if !eq(v, last) { + s[i] = v + i++ + last = v + } + } + return s[:i] +} + +// Grow increases the slice's capacity, if necessary, to guarantee space for +// another n elements. After Grow(n), at least n elements can be appended +// to the slice without another allocation. Grow may modify elements of the +// slice between the length and the capacity. If n is negative or too large to +// allocate the memory, Grow panics. +func Grow[S ~[]E, E any](s S, n int) S { + return append(s, make(S, n)...)[:len(s)] +} + +// Clip removes unused capacity from the slice, returning s[:len(s):len(s)]. +func Clip[S ~[]E, E any](s S) S { + return s[:len(s):len(s)] +} diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go index c22e74bd10..3fa10d45bd 100644 --- a/vendor/golang.org/x/exp/slices/sort.go +++ b/vendor/golang.org/x/exp/slices/sort.go @@ -1,127 +1,127 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package slices - -import ( - "math/bits" - - "golang.org/x/exp/constraints" -) - -// Sort sorts a slice of any ordered type in ascending order. -// Sort may fail to sort correctly when sorting slices of floating-point -// numbers containing Not-a-number (NaN) values. -// Use slices.SortFunc(x, func(a, b float64) bool {return a < b || (math.IsNaN(a) && !math.IsNaN(b))}) -// instead if the input may contain NaNs. -func Sort[E constraints.Ordered](x []E) { - n := len(x) - pdqsortOrdered(x, 0, n, bits.Len(uint(n))) -} - -// SortFunc sorts the slice x in ascending order as determined by the less function. -// This sort is not guaranteed to be stable. -// -// SortFunc requires that less is a strict weak ordering. -// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings. -func SortFunc[E any](x []E, less func(a, b E) bool) { - n := len(x) - pdqsortLessFunc(x, 0, n, bits.Len(uint(n)), less) -} - -// SortStable sorts the slice x while keeping the original order of equal -// elements, using less to compare elements. -func SortStableFunc[E any](x []E, less func(a, b E) bool) { - stableLessFunc(x, len(x), less) -} - -// IsSorted reports whether x is sorted in ascending order. -func IsSorted[E constraints.Ordered](x []E) bool { - for i := len(x) - 1; i > 0; i-- { - if x[i] < x[i-1] { - return false - } - } - return true -} - -// IsSortedFunc reports whether x is sorted in ascending order, with less as the -// comparison function. -func IsSortedFunc[E any](x []E, less func(a, b E) bool) bool { - for i := len(x) - 1; i > 0; i-- { - if less(x[i], x[i-1]) { - return false - } - } - return true -} - -// BinarySearch searches for target in a sorted slice and returns the position -// where target is found, or the position where target would appear in the -// sort order; it also returns a bool saying whether the target is really found -// in the slice. The slice must be sorted in increasing order. -func BinarySearch[E constraints.Ordered](x []E, target E) (int, bool) { - // search returns the leftmost position where f returns true, or len(x) if f - // returns false for all x. This is the insertion position for target in x, - // and could point to an element that's either == target or not. - pos := search(len(x), func(i int) bool { return x[i] >= target }) - if pos >= len(x) || x[pos] != target { - return pos, false - } else { - return pos, true - } -} - -// BinarySearchFunc works like BinarySearch, but uses a custom comparison -// function. The slice must be sorted in increasing order, where "increasing" is -// defined by cmp. cmp(a, b) is expected to return an integer comparing the two -// parameters: 0 if a == b, a negative number if a < b and a positive number if -// a > b. -func BinarySearchFunc[E any](x []E, target E, cmp func(E, E) int) (int, bool) { - pos := search(len(x), func(i int) bool { return cmp(x[i], target) >= 0 }) - if pos >= len(x) || cmp(x[pos], target) != 0 { - return pos, false - } else { - return pos, true - } -} - -func search(n int, f func(int) bool) int { - // Define f(-1) == false and f(n) == true. - // Invariant: f(i-1) == false, f(j) == true. - i, j := 0, n - for i < j { - h := int(uint(i+j) >> 1) // avoid overflow when computing h - // i ≤ h < j - if !f(h) { - i = h + 1 // preserves f(i-1) == false - } else { - j = h // preserves f(j) == true - } - } - // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i. - return i -} - -type sortedHint int // hint for pdqsort when choosing the pivot - -const ( - unknownHint sortedHint = iota - increasingHint - decreasingHint -) - -// xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf -type xorshift uint64 - -func (r *xorshift) Next() uint64 { - *r ^= *r << 13 - *r ^= *r >> 17 - *r ^= *r << 5 - return uint64(*r) -} - -func nextPowerOfTwo(length int) uint { - return 1 << bits.Len(uint(length)) -} +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +import ( + "math/bits" + + "golang.org/x/exp/constraints" +) + +// Sort sorts a slice of any ordered type in ascending order. +// Sort may fail to sort correctly when sorting slices of floating-point +// numbers containing Not-a-number (NaN) values. +// Use slices.SortFunc(x, func(a, b float64) bool {return a < b || (math.IsNaN(a) && !math.IsNaN(b))}) +// instead if the input may contain NaNs. +func Sort[E constraints.Ordered](x []E) { + n := len(x) + pdqsortOrdered(x, 0, n, bits.Len(uint(n))) +} + +// SortFunc sorts the slice x in ascending order as determined by the less function. +// This sort is not guaranteed to be stable. +// +// SortFunc requires that less is a strict weak ordering. +// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings. +func SortFunc[E any](x []E, less func(a, b E) bool) { + n := len(x) + pdqsortLessFunc(x, 0, n, bits.Len(uint(n)), less) +} + +// SortStable sorts the slice x while keeping the original order of equal +// elements, using less to compare elements. +func SortStableFunc[E any](x []E, less func(a, b E) bool) { + stableLessFunc(x, len(x), less) +} + +// IsSorted reports whether x is sorted in ascending order. +func IsSorted[E constraints.Ordered](x []E) bool { + for i := len(x) - 1; i > 0; i-- { + if x[i] < x[i-1] { + return false + } + } + return true +} + +// IsSortedFunc reports whether x is sorted in ascending order, with less as the +// comparison function. +func IsSortedFunc[E any](x []E, less func(a, b E) bool) bool { + for i := len(x) - 1; i > 0; i-- { + if less(x[i], x[i-1]) { + return false + } + } + return true +} + +// BinarySearch searches for target in a sorted slice and returns the position +// where target is found, or the position where target would appear in the +// sort order; it also returns a bool saying whether the target is really found +// in the slice. The slice must be sorted in increasing order. +func BinarySearch[E constraints.Ordered](x []E, target E) (int, bool) { + // search returns the leftmost position where f returns true, or len(x) if f + // returns false for all x. This is the insertion position for target in x, + // and could point to an element that's either == target or not. + pos := search(len(x), func(i int) bool { return x[i] >= target }) + if pos >= len(x) || x[pos] != target { + return pos, false + } else { + return pos, true + } +} + +// BinarySearchFunc works like BinarySearch, but uses a custom comparison +// function. The slice must be sorted in increasing order, where "increasing" is +// defined by cmp. cmp(a, b) is expected to return an integer comparing the two +// parameters: 0 if a == b, a negative number if a < b and a positive number if +// a > b. +func BinarySearchFunc[E any](x []E, target E, cmp func(E, E) int) (int, bool) { + pos := search(len(x), func(i int) bool { return cmp(x[i], target) >= 0 }) + if pos >= len(x) || cmp(x[pos], target) != 0 { + return pos, false + } else { + return pos, true + } +} + +func search(n int, f func(int) bool) int { + // Define f(-1) == false and f(n) == true. + // Invariant: f(i-1) == false, f(j) == true. + i, j := 0, n + for i < j { + h := int(uint(i+j) >> 1) // avoid overflow when computing h + // i ≤ h < j + if !f(h) { + i = h + 1 // preserves f(i-1) == false + } else { + j = h // preserves f(j) == true + } + } + // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i. + return i +} + +type sortedHint int // hint for pdqsort when choosing the pivot + +const ( + unknownHint sortedHint = iota + increasingHint + decreasingHint +) + +// xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf +type xorshift uint64 + +func (r *xorshift) Next() uint64 { + *r ^= *r << 13 + *r ^= *r >> 17 + *r ^= *r << 5 + return uint64(*r) +} + +func nextPowerOfTwo(length int) uint { + return 1 << bits.Len(uint(length)) +} diff --git a/vendor/golang.org/x/exp/slices/zsortfunc.go b/vendor/golang.org/x/exp/slices/zsortfunc.go index 2a632476c5..45160e3388 100644 --- a/vendor/golang.org/x/exp/slices/zsortfunc.go +++ b/vendor/golang.org/x/exp/slices/zsortfunc.go @@ -1,479 +1,479 @@ -// Code generated by gen_sort_variants.go; DO NOT EDIT. - -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package slices - -// insertionSortLessFunc sorts data[a:b] using insertion sort. -func insertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { - for i := a + 1; i < b; i++ { - for j := i; j > a && less(data[j], data[j-1]); j-- { - data[j], data[j-1] = data[j-1], data[j] - } - } -} - -// siftDownLessFunc implements the heap property on data[lo:hi]. -// first is an offset into the array where the root of the heap lies. -func siftDownLessFunc[E any](data []E, lo, hi, first int, less func(a, b E) bool) { - root := lo - for { - child := 2*root + 1 - if child >= hi { - break - } - if child+1 < hi && less(data[first+child], data[first+child+1]) { - child++ - } - if !less(data[first+root], data[first+child]) { - return - } - data[first+root], data[first+child] = data[first+child], data[first+root] - root = child - } -} - -func heapSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { - first := a - lo := 0 - hi := b - a - - // Build heap with greatest element at top. - for i := (hi - 1) / 2; i >= 0; i-- { - siftDownLessFunc(data, i, hi, first, less) - } - - // Pop elements, largest first, into end of data. - for i := hi - 1; i >= 0; i-- { - data[first], data[first+i] = data[first+i], data[first] - siftDownLessFunc(data, lo, i, first, less) - } -} - -// pdqsortLessFunc sorts data[a:b]. -// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. -// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf -// C++ implementation: https://github.com/orlp/pdqsort -// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ -// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. -func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) { - const maxInsertion = 12 - - var ( - wasBalanced = true // whether the last partitioning was reasonably balanced - wasPartitioned = true // whether the slice was already partitioned - ) - - for { - length := b - a - - if length <= maxInsertion { - insertionSortLessFunc(data, a, b, less) - return - } - - // Fall back to heapsort if too many bad choices were made. - if limit == 0 { - heapSortLessFunc(data, a, b, less) - return - } - - // If the last partitioning was imbalanced, we need to breaking patterns. - if !wasBalanced { - breakPatternsLessFunc(data, a, b, less) - limit-- - } - - pivot, hint := choosePivotLessFunc(data, a, b, less) - if hint == decreasingHint { - reverseRangeLessFunc(data, a, b, less) - // The chosen pivot was pivot-a elements after the start of the array. - // After reversing it is pivot-a elements before the end of the array. - // The idea came from Rust's implementation. - pivot = (b - 1) - (pivot - a) - hint = increasingHint - } - - // The slice is likely already sorted. - if wasBalanced && wasPartitioned && hint == increasingHint { - if partialInsertionSortLessFunc(data, a, b, less) { - return - } - } - - // Probably the slice contains many duplicate elements, partition the slice into - // elements equal to and elements greater than the pivot. - if a > 0 && !less(data[a-1], data[pivot]) { - mid := partitionEqualLessFunc(data, a, b, pivot, less) - a = mid - continue - } - - mid, alreadyPartitioned := partitionLessFunc(data, a, b, pivot, less) - wasPartitioned = alreadyPartitioned - - leftLen, rightLen := mid-a, b-mid - balanceThreshold := length / 8 - if leftLen < rightLen { - wasBalanced = leftLen >= balanceThreshold - pdqsortLessFunc(data, a, mid, limit, less) - a = mid + 1 - } else { - wasBalanced = rightLen >= balanceThreshold - pdqsortLessFunc(data, mid+1, b, limit, less) - b = mid - } - } -} - -// partitionLessFunc does one quicksort partition. -// Let p = data[pivot] -// Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. -// On return, data[newpivot] = p -func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int, alreadyPartitioned bool) { - data[a], data[pivot] = data[pivot], data[a] - i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned - - for i <= j && less(data[i], data[a]) { - i++ - } - for i <= j && !less(data[j], data[a]) { - j-- - } - if i > j { - data[j], data[a] = data[a], data[j] - return j, true - } - data[i], data[j] = data[j], data[i] - i++ - j-- - - for { - for i <= j && less(data[i], data[a]) { - i++ - } - for i <= j && !less(data[j], data[a]) { - j-- - } - if i > j { - break - } - data[i], data[j] = data[j], data[i] - i++ - j-- - } - data[j], data[a] = data[a], data[j] - return j, false -} - -// partitionEqualLessFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. -// It assumed that data[a:b] does not contain elements smaller than the data[pivot]. -func partitionEqualLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int) { - data[a], data[pivot] = data[pivot], data[a] - i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned - - for { - for i <= j && !less(data[a], data[i]) { - i++ - } - for i <= j && less(data[a], data[j]) { - j-- - } - if i > j { - break - } - data[i], data[j] = data[j], data[i] - i++ - j-- - } - return i -} - -// partialInsertionSortLessFunc partially sorts a slice, returns true if the slice is sorted at the end. -func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) bool { - const ( - maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted - shortestShifting = 50 // don't shift any elements on short arrays - ) - i := a + 1 - for j := 0; j < maxSteps; j++ { - for i < b && !less(data[i], data[i-1]) { - i++ - } - - if i == b { - return true - } - - if b-a < shortestShifting { - return false - } - - data[i], data[i-1] = data[i-1], data[i] - - // Shift the smaller one to the left. - if i-a >= 2 { - for j := i - 1; j >= 1; j-- { - if !less(data[j], data[j-1]) { - break - } - data[j], data[j-1] = data[j-1], data[j] - } - } - // Shift the greater one to the right. - if b-i >= 2 { - for j := i + 1; j < b; j++ { - if !less(data[j], data[j-1]) { - break - } - data[j], data[j-1] = data[j-1], data[j] - } - } - } - return false -} - -// breakPatternsLessFunc scatters some elements around in an attempt to break some patterns -// that might cause imbalanced partitions in quicksort. -func breakPatternsLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { - length := b - a - if length >= 8 { - random := xorshift(length) - modulus := nextPowerOfTwo(length) - - for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ { - other := int(uint(random.Next()) & (modulus - 1)) - if other >= length { - other -= length - } - data[idx], data[a+other] = data[a+other], data[idx] - } - } -} - -// choosePivotLessFunc chooses a pivot in data[a:b]. -// -// [0,8): chooses a static pivot. -// [8,shortestNinther): uses the simple median-of-three method. -// [shortestNinther,∞): uses the Tukey ninther method. -func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (pivot int, hint sortedHint) { - const ( - shortestNinther = 50 - maxSwaps = 4 * 3 - ) - - l := b - a - - var ( - swaps int - i = a + l/4*1 - j = a + l/4*2 - k = a + l/4*3 - ) - - if l >= 8 { - if l >= shortestNinther { - // Tukey ninther method, the idea came from Rust's implementation. - i = medianAdjacentLessFunc(data, i, &swaps, less) - j = medianAdjacentLessFunc(data, j, &swaps, less) - k = medianAdjacentLessFunc(data, k, &swaps, less) - } - // Find the median among i, j, k and stores it into j. - j = medianLessFunc(data, i, j, k, &swaps, less) - } - - switch swaps { - case 0: - return j, increasingHint - case maxSwaps: - return j, decreasingHint - default: - return j, unknownHint - } -} - -// order2LessFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. -func order2LessFunc[E any](data []E, a, b int, swaps *int, less func(a, b E) bool) (int, int) { - if less(data[b], data[a]) { - *swaps++ - return b, a - } - return a, b -} - -// medianLessFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. -func medianLessFunc[E any](data []E, a, b, c int, swaps *int, less func(a, b E) bool) int { - a, b = order2LessFunc(data, a, b, swaps, less) - b, c = order2LessFunc(data, b, c, swaps, less) - a, b = order2LessFunc(data, a, b, swaps, less) - return b -} - -// medianAdjacentLessFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. -func medianAdjacentLessFunc[E any](data []E, a int, swaps *int, less func(a, b E) bool) int { - return medianLessFunc(data, a-1, a, a+1, swaps, less) -} - -func reverseRangeLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { - i := a - j := b - 1 - for i < j { - data[i], data[j] = data[j], data[i] - i++ - j-- - } -} - -func swapRangeLessFunc[E any](data []E, a, b, n int, less func(a, b E) bool) { - for i := 0; i < n; i++ { - data[a+i], data[b+i] = data[b+i], data[a+i] - } -} - -func stableLessFunc[E any](data []E, n int, less func(a, b E) bool) { - blockSize := 20 // must be > 0 - a, b := 0, blockSize - for b <= n { - insertionSortLessFunc(data, a, b, less) - a = b - b += blockSize - } - insertionSortLessFunc(data, a, n, less) - - for blockSize < n { - a, b = 0, 2*blockSize - for b <= n { - symMergeLessFunc(data, a, a+blockSize, b, less) - a = b - b += 2 * blockSize - } - if m := a + blockSize; m < n { - symMergeLessFunc(data, a, m, n, less) - } - blockSize *= 2 - } -} - -// symMergeLessFunc merges the two sorted subsequences data[a:m] and data[m:b] using -// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum -// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz -// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in -// Computer Science, pages 714-723. Springer, 2004. -// -// Let M = m-a and N = b-n. Wolog M < N. -// The recursion depth is bound by ceil(log(N+M)). -// The algorithm needs O(M*log(N/M + 1)) calls to data.Less. -// The algorithm needs O((M+N)*log(M)) calls to data.Swap. -// -// The paper gives O((M+N)*log(M)) as the number of assignments assuming a -// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation -// in the paper carries through for Swap operations, especially as the block -// swapping rotate uses only O(M+N) Swaps. -// -// symMerge assumes non-degenerate arguments: a < m && m < b. -// Having the caller check this condition eliminates many leaf recursion calls, -// which improves performance. -func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { - // Avoid unnecessary recursions of symMerge - // by direct insertion of data[a] into data[m:b] - // if data[a:m] only contains one element. - if m-a == 1 { - // Use binary search to find the lowest index i - // such that data[i] >= data[a] for m <= i < b. - // Exit the search loop with i == b in case no such index exists. - i := m - j := b - for i < j { - h := int(uint(i+j) >> 1) - if less(data[h], data[a]) { - i = h + 1 - } else { - j = h - } - } - // Swap values until data[a] reaches the position before i. - for k := a; k < i-1; k++ { - data[k], data[k+1] = data[k+1], data[k] - } - return - } - - // Avoid unnecessary recursions of symMerge - // by direct insertion of data[m] into data[a:m] - // if data[m:b] only contains one element. - if b-m == 1 { - // Use binary search to find the lowest index i - // such that data[i] > data[m] for a <= i < m. - // Exit the search loop with i == m in case no such index exists. - i := a - j := m - for i < j { - h := int(uint(i+j) >> 1) - if !less(data[m], data[h]) { - i = h + 1 - } else { - j = h - } - } - // Swap values until data[m] reaches the position i. - for k := m; k > i; k-- { - data[k], data[k-1] = data[k-1], data[k] - } - return - } - - mid := int(uint(a+b) >> 1) - n := mid + m - var start, r int - if m > mid { - start = n - b - r = mid - } else { - start = a - r = m - } - p := n - 1 - - for start < r { - c := int(uint(start+r) >> 1) - if !less(data[p-c], data[c]) { - start = c + 1 - } else { - r = c - } - } - - end := n - start - if start < m && m < end { - rotateLessFunc(data, start, m, end, less) - } - if a < start && start < mid { - symMergeLessFunc(data, a, start, mid, less) - } - if mid < end && end < b { - symMergeLessFunc(data, mid, end, b, less) - } -} - -// rotateLessFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: -// Data of the form 'x u v y' is changed to 'x v u y'. -// rotate performs at most b-a many calls to data.Swap, -// and it assumes non-degenerate arguments: a < m && m < b. -func rotateLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { - i := m - a - j := b - m - - for i != j { - if i > j { - swapRangeLessFunc(data, m-i, m, j, less) - i -= j - } else { - swapRangeLessFunc(data, m-i, m+j-i, i, less) - j -= i - } - } - // i == j - swapRangeLessFunc(data, m-i, m, i, less) -} +// Code generated by gen_sort_variants.go; DO NOT EDIT. + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +// insertionSortLessFunc sorts data[a:b] using insertion sort. +func insertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { + for i := a + 1; i < b; i++ { + for j := i; j > a && less(data[j], data[j-1]); j-- { + data[j], data[j-1] = data[j-1], data[j] + } + } +} + +// siftDownLessFunc implements the heap property on data[lo:hi]. +// first is an offset into the array where the root of the heap lies. +func siftDownLessFunc[E any](data []E, lo, hi, first int, less func(a, b E) bool) { + root := lo + for { + child := 2*root + 1 + if child >= hi { + break + } + if child+1 < hi && less(data[first+child], data[first+child+1]) { + child++ + } + if !less(data[first+root], data[first+child]) { + return + } + data[first+root], data[first+child] = data[first+child], data[first+root] + root = child + } +} + +func heapSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { + first := a + lo := 0 + hi := b - a + + // Build heap with greatest element at top. + for i := (hi - 1) / 2; i >= 0; i-- { + siftDownLessFunc(data, i, hi, first, less) + } + + // Pop elements, largest first, into end of data. + for i := hi - 1; i >= 0; i-- { + data[first], data[first+i] = data[first+i], data[first] + siftDownLessFunc(data, lo, i, first, less) + } +} + +// pdqsortLessFunc sorts data[a:b]. +// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. +// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf +// C++ implementation: https://github.com/orlp/pdqsort +// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ +// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. +func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) { + const maxInsertion = 12 + + var ( + wasBalanced = true // whether the last partitioning was reasonably balanced + wasPartitioned = true // whether the slice was already partitioned + ) + + for { + length := b - a + + if length <= maxInsertion { + insertionSortLessFunc(data, a, b, less) + return + } + + // Fall back to heapsort if too many bad choices were made. + if limit == 0 { + heapSortLessFunc(data, a, b, less) + return + } + + // If the last partitioning was imbalanced, we need to breaking patterns. + if !wasBalanced { + breakPatternsLessFunc(data, a, b, less) + limit-- + } + + pivot, hint := choosePivotLessFunc(data, a, b, less) + if hint == decreasingHint { + reverseRangeLessFunc(data, a, b, less) + // The chosen pivot was pivot-a elements after the start of the array. + // After reversing it is pivot-a elements before the end of the array. + // The idea came from Rust's implementation. + pivot = (b - 1) - (pivot - a) + hint = increasingHint + } + + // The slice is likely already sorted. + if wasBalanced && wasPartitioned && hint == increasingHint { + if partialInsertionSortLessFunc(data, a, b, less) { + return + } + } + + // Probably the slice contains many duplicate elements, partition the slice into + // elements equal to and elements greater than the pivot. + if a > 0 && !less(data[a-1], data[pivot]) { + mid := partitionEqualLessFunc(data, a, b, pivot, less) + a = mid + continue + } + + mid, alreadyPartitioned := partitionLessFunc(data, a, b, pivot, less) + wasPartitioned = alreadyPartitioned + + leftLen, rightLen := mid-a, b-mid + balanceThreshold := length / 8 + if leftLen < rightLen { + wasBalanced = leftLen >= balanceThreshold + pdqsortLessFunc(data, a, mid, limit, less) + a = mid + 1 + } else { + wasBalanced = rightLen >= balanceThreshold + pdqsortLessFunc(data, mid+1, b, limit, less) + b = mid + } + } +} + +// partitionLessFunc does one quicksort partition. +// Let p = data[pivot] +// Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. +// On return, data[newpivot] = p +func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int, alreadyPartitioned bool) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for i <= j && less(data[i], data[a]) { + i++ + } + for i <= j && !less(data[j], data[a]) { + j-- + } + if i > j { + data[j], data[a] = data[a], data[j] + return j, true + } + data[i], data[j] = data[j], data[i] + i++ + j-- + + for { + for i <= j && less(data[i], data[a]) { + i++ + } + for i <= j && !less(data[j], data[a]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + data[j], data[a] = data[a], data[j] + return j, false +} + +// partitionEqualLessFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. +// It assumed that data[a:b] does not contain elements smaller than the data[pivot]. +func partitionEqualLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for { + for i <= j && !less(data[a], data[i]) { + i++ + } + for i <= j && less(data[a], data[j]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + return i +} + +// partialInsertionSortLessFunc partially sorts a slice, returns true if the slice is sorted at the end. +func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) bool { + const ( + maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted + shortestShifting = 50 // don't shift any elements on short arrays + ) + i := a + 1 + for j := 0; j < maxSteps; j++ { + for i < b && !less(data[i], data[i-1]) { + i++ + } + + if i == b { + return true + } + + if b-a < shortestShifting { + return false + } + + data[i], data[i-1] = data[i-1], data[i] + + // Shift the smaller one to the left. + if i-a >= 2 { + for j := i - 1; j >= 1; j-- { + if !less(data[j], data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + // Shift the greater one to the right. + if b-i >= 2 { + for j := i + 1; j < b; j++ { + if !less(data[j], data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + } + return false +} + +// breakPatternsLessFunc scatters some elements around in an attempt to break some patterns +// that might cause imbalanced partitions in quicksort. +func breakPatternsLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { + length := b - a + if length >= 8 { + random := xorshift(length) + modulus := nextPowerOfTwo(length) + + for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ { + other := int(uint(random.Next()) & (modulus - 1)) + if other >= length { + other -= length + } + data[idx], data[a+other] = data[a+other], data[idx] + } + } +} + +// choosePivotLessFunc chooses a pivot in data[a:b]. +// +// [0,8): chooses a static pivot. +// [8,shortestNinther): uses the simple median-of-three method. +// [shortestNinther,∞): uses the Tukey ninther method. +func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (pivot int, hint sortedHint) { + const ( + shortestNinther = 50 + maxSwaps = 4 * 3 + ) + + l := b - a + + var ( + swaps int + i = a + l/4*1 + j = a + l/4*2 + k = a + l/4*3 + ) + + if l >= 8 { + if l >= shortestNinther { + // Tukey ninther method, the idea came from Rust's implementation. + i = medianAdjacentLessFunc(data, i, &swaps, less) + j = medianAdjacentLessFunc(data, j, &swaps, less) + k = medianAdjacentLessFunc(data, k, &swaps, less) + } + // Find the median among i, j, k and stores it into j. + j = medianLessFunc(data, i, j, k, &swaps, less) + } + + switch swaps { + case 0: + return j, increasingHint + case maxSwaps: + return j, decreasingHint + default: + return j, unknownHint + } +} + +// order2LessFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. +func order2LessFunc[E any](data []E, a, b int, swaps *int, less func(a, b E) bool) (int, int) { + if less(data[b], data[a]) { + *swaps++ + return b, a + } + return a, b +} + +// medianLessFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. +func medianLessFunc[E any](data []E, a, b, c int, swaps *int, less func(a, b E) bool) int { + a, b = order2LessFunc(data, a, b, swaps, less) + b, c = order2LessFunc(data, b, c, swaps, less) + a, b = order2LessFunc(data, a, b, swaps, less) + return b +} + +// medianAdjacentLessFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. +func medianAdjacentLessFunc[E any](data []E, a int, swaps *int, less func(a, b E) bool) int { + return medianLessFunc(data, a-1, a, a+1, swaps, less) +} + +func reverseRangeLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { + i := a + j := b - 1 + for i < j { + data[i], data[j] = data[j], data[i] + i++ + j-- + } +} + +func swapRangeLessFunc[E any](data []E, a, b, n int, less func(a, b E) bool) { + for i := 0; i < n; i++ { + data[a+i], data[b+i] = data[b+i], data[a+i] + } +} + +func stableLessFunc[E any](data []E, n int, less func(a, b E) bool) { + blockSize := 20 // must be > 0 + a, b := 0, blockSize + for b <= n { + insertionSortLessFunc(data, a, b, less) + a = b + b += blockSize + } + insertionSortLessFunc(data, a, n, less) + + for blockSize < n { + a, b = 0, 2*blockSize + for b <= n { + symMergeLessFunc(data, a, a+blockSize, b, less) + a = b + b += 2 * blockSize + } + if m := a + blockSize; m < n { + symMergeLessFunc(data, a, m, n, less) + } + blockSize *= 2 + } +} + +// symMergeLessFunc merges the two sorted subsequences data[a:m] and data[m:b] using +// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum +// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz +// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in +// Computer Science, pages 714-723. Springer, 2004. +// +// Let M = m-a and N = b-n. Wolog M < N. +// The recursion depth is bound by ceil(log(N+M)). +// The algorithm needs O(M*log(N/M + 1)) calls to data.Less. +// The algorithm needs O((M+N)*log(M)) calls to data.Swap. +// +// The paper gives O((M+N)*log(M)) as the number of assignments assuming a +// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation +// in the paper carries through for Swap operations, especially as the block +// swapping rotate uses only O(M+N) Swaps. +// +// symMerge assumes non-degenerate arguments: a < m && m < b. +// Having the caller check this condition eliminates many leaf recursion calls, +// which improves performance. +func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[a] into data[m:b] + // if data[a:m] only contains one element. + if m-a == 1 { + // Use binary search to find the lowest index i + // such that data[i] >= data[a] for m <= i < b. + // Exit the search loop with i == b in case no such index exists. + i := m + j := b + for i < j { + h := int(uint(i+j) >> 1) + if less(data[h], data[a]) { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[a] reaches the position before i. + for k := a; k < i-1; k++ { + data[k], data[k+1] = data[k+1], data[k] + } + return + } + + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[m] into data[a:m] + // if data[m:b] only contains one element. + if b-m == 1 { + // Use binary search to find the lowest index i + // such that data[i] > data[m] for a <= i < m. + // Exit the search loop with i == m in case no such index exists. + i := a + j := m + for i < j { + h := int(uint(i+j) >> 1) + if !less(data[m], data[h]) { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[m] reaches the position i. + for k := m; k > i; k-- { + data[k], data[k-1] = data[k-1], data[k] + } + return + } + + mid := int(uint(a+b) >> 1) + n := mid + m + var start, r int + if m > mid { + start = n - b + r = mid + } else { + start = a + r = m + } + p := n - 1 + + for start < r { + c := int(uint(start+r) >> 1) + if !less(data[p-c], data[c]) { + start = c + 1 + } else { + r = c + } + } + + end := n - start + if start < m && m < end { + rotateLessFunc(data, start, m, end, less) + } + if a < start && start < mid { + symMergeLessFunc(data, a, start, mid, less) + } + if mid < end && end < b { + symMergeLessFunc(data, mid, end, b, less) + } +} + +// rotateLessFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: +// Data of the form 'x u v y' is changed to 'x v u y'. +// rotate performs at most b-a many calls to data.Swap, +// and it assumes non-degenerate arguments: a < m && m < b. +func rotateLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { + i := m - a + j := b - m + + for i != j { + if i > j { + swapRangeLessFunc(data, m-i, m, j, less) + i -= j + } else { + swapRangeLessFunc(data, m-i, m+j-i, i, less) + j -= i + } + } + // i == j + swapRangeLessFunc(data, m-i, m, i, less) +} diff --git a/vendor/golang.org/x/exp/slices/zsortordered.go b/vendor/golang.org/x/exp/slices/zsortordered.go index efaa1c8b71..73f6d27590 100644 --- a/vendor/golang.org/x/exp/slices/zsortordered.go +++ b/vendor/golang.org/x/exp/slices/zsortordered.go @@ -1,481 +1,481 @@ -// Code generated by gen_sort_variants.go; DO NOT EDIT. - -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package slices - -import "golang.org/x/exp/constraints" - -// insertionSortOrdered sorts data[a:b] using insertion sort. -func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) { - for i := a + 1; i < b; i++ { - for j := i; j > a && (data[j] < data[j-1]); j-- { - data[j], data[j-1] = data[j-1], data[j] - } - } -} - -// siftDownOrdered implements the heap property on data[lo:hi]. -// first is an offset into the array where the root of the heap lies. -func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) { - root := lo - for { - child := 2*root + 1 - if child >= hi { - break - } - if child+1 < hi && (data[first+child] < data[first+child+1]) { - child++ - } - if !(data[first+root] < data[first+child]) { - return - } - data[first+root], data[first+child] = data[first+child], data[first+root] - root = child - } -} - -func heapSortOrdered[E constraints.Ordered](data []E, a, b int) { - first := a - lo := 0 - hi := b - a - - // Build heap with greatest element at top. - for i := (hi - 1) / 2; i >= 0; i-- { - siftDownOrdered(data, i, hi, first) - } - - // Pop elements, largest first, into end of data. - for i := hi - 1; i >= 0; i-- { - data[first], data[first+i] = data[first+i], data[first] - siftDownOrdered(data, lo, i, first) - } -} - -// pdqsortOrdered sorts data[a:b]. -// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. -// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf -// C++ implementation: https://github.com/orlp/pdqsort -// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ -// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. -func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) { - const maxInsertion = 12 - - var ( - wasBalanced = true // whether the last partitioning was reasonably balanced - wasPartitioned = true // whether the slice was already partitioned - ) - - for { - length := b - a - - if length <= maxInsertion { - insertionSortOrdered(data, a, b) - return - } - - // Fall back to heapsort if too many bad choices were made. - if limit == 0 { - heapSortOrdered(data, a, b) - return - } - - // If the last partitioning was imbalanced, we need to breaking patterns. - if !wasBalanced { - breakPatternsOrdered(data, a, b) - limit-- - } - - pivot, hint := choosePivotOrdered(data, a, b) - if hint == decreasingHint { - reverseRangeOrdered(data, a, b) - // The chosen pivot was pivot-a elements after the start of the array. - // After reversing it is pivot-a elements before the end of the array. - // The idea came from Rust's implementation. - pivot = (b - 1) - (pivot - a) - hint = increasingHint - } - - // The slice is likely already sorted. - if wasBalanced && wasPartitioned && hint == increasingHint { - if partialInsertionSortOrdered(data, a, b) { - return - } - } - - // Probably the slice contains many duplicate elements, partition the slice into - // elements equal to and elements greater than the pivot. - if a > 0 && !(data[a-1] < data[pivot]) { - mid := partitionEqualOrdered(data, a, b, pivot) - a = mid - continue - } - - mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot) - wasPartitioned = alreadyPartitioned - - leftLen, rightLen := mid-a, b-mid - balanceThreshold := length / 8 - if leftLen < rightLen { - wasBalanced = leftLen >= balanceThreshold - pdqsortOrdered(data, a, mid, limit) - a = mid + 1 - } else { - wasBalanced = rightLen >= balanceThreshold - pdqsortOrdered(data, mid+1, b, limit) - b = mid - } - } -} - -// partitionOrdered does one quicksort partition. -// Let p = data[pivot] -// Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. -// On return, data[newpivot] = p -func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int, alreadyPartitioned bool) { - data[a], data[pivot] = data[pivot], data[a] - i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned - - for i <= j && (data[i] < data[a]) { - i++ - } - for i <= j && !(data[j] < data[a]) { - j-- - } - if i > j { - data[j], data[a] = data[a], data[j] - return j, true - } - data[i], data[j] = data[j], data[i] - i++ - j-- - - for { - for i <= j && (data[i] < data[a]) { - i++ - } - for i <= j && !(data[j] < data[a]) { - j-- - } - if i > j { - break - } - data[i], data[j] = data[j], data[i] - i++ - j-- - } - data[j], data[a] = data[a], data[j] - return j, false -} - -// partitionEqualOrdered partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. -// It assumed that data[a:b] does not contain elements smaller than the data[pivot]. -func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int) { - data[a], data[pivot] = data[pivot], data[a] - i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned - - for { - for i <= j && !(data[a] < data[i]) { - i++ - } - for i <= j && (data[a] < data[j]) { - j-- - } - if i > j { - break - } - data[i], data[j] = data[j], data[i] - i++ - j-- - } - return i -} - -// partialInsertionSortOrdered partially sorts a slice, returns true if the slice is sorted at the end. -func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool { - const ( - maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted - shortestShifting = 50 // don't shift any elements on short arrays - ) - i := a + 1 - for j := 0; j < maxSteps; j++ { - for i < b && !(data[i] < data[i-1]) { - i++ - } - - if i == b { - return true - } - - if b-a < shortestShifting { - return false - } - - data[i], data[i-1] = data[i-1], data[i] - - // Shift the smaller one to the left. - if i-a >= 2 { - for j := i - 1; j >= 1; j-- { - if !(data[j] < data[j-1]) { - break - } - data[j], data[j-1] = data[j-1], data[j] - } - } - // Shift the greater one to the right. - if b-i >= 2 { - for j := i + 1; j < b; j++ { - if !(data[j] < data[j-1]) { - break - } - data[j], data[j-1] = data[j-1], data[j] - } - } - } - return false -} - -// breakPatternsOrdered scatters some elements around in an attempt to break some patterns -// that might cause imbalanced partitions in quicksort. -func breakPatternsOrdered[E constraints.Ordered](data []E, a, b int) { - length := b - a - if length >= 8 { - random := xorshift(length) - modulus := nextPowerOfTwo(length) - - for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ { - other := int(uint(random.Next()) & (modulus - 1)) - if other >= length { - other -= length - } - data[idx], data[a+other] = data[a+other], data[idx] - } - } -} - -// choosePivotOrdered chooses a pivot in data[a:b]. -// -// [0,8): chooses a static pivot. -// [8,shortestNinther): uses the simple median-of-three method. -// [shortestNinther,∞): uses the Tukey ninther method. -func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, hint sortedHint) { - const ( - shortestNinther = 50 - maxSwaps = 4 * 3 - ) - - l := b - a - - var ( - swaps int - i = a + l/4*1 - j = a + l/4*2 - k = a + l/4*3 - ) - - if l >= 8 { - if l >= shortestNinther { - // Tukey ninther method, the idea came from Rust's implementation. - i = medianAdjacentOrdered(data, i, &swaps) - j = medianAdjacentOrdered(data, j, &swaps) - k = medianAdjacentOrdered(data, k, &swaps) - } - // Find the median among i, j, k and stores it into j. - j = medianOrdered(data, i, j, k, &swaps) - } - - switch swaps { - case 0: - return j, increasingHint - case maxSwaps: - return j, decreasingHint - default: - return j, unknownHint - } -} - -// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. -func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) { - if data[b] < data[a] { - *swaps++ - return b, a - } - return a, b -} - -// medianOrdered returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. -func medianOrdered[E constraints.Ordered](data []E, a, b, c int, swaps *int) int { - a, b = order2Ordered(data, a, b, swaps) - b, c = order2Ordered(data, b, c, swaps) - a, b = order2Ordered(data, a, b, swaps) - return b -} - -// medianAdjacentOrdered finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. -func medianAdjacentOrdered[E constraints.Ordered](data []E, a int, swaps *int) int { - return medianOrdered(data, a-1, a, a+1, swaps) -} - -func reverseRangeOrdered[E constraints.Ordered](data []E, a, b int) { - i := a - j := b - 1 - for i < j { - data[i], data[j] = data[j], data[i] - i++ - j-- - } -} - -func swapRangeOrdered[E constraints.Ordered](data []E, a, b, n int) { - for i := 0; i < n; i++ { - data[a+i], data[b+i] = data[b+i], data[a+i] - } -} - -func stableOrdered[E constraints.Ordered](data []E, n int) { - blockSize := 20 // must be > 0 - a, b := 0, blockSize - for b <= n { - insertionSortOrdered(data, a, b) - a = b - b += blockSize - } - insertionSortOrdered(data, a, n) - - for blockSize < n { - a, b = 0, 2*blockSize - for b <= n { - symMergeOrdered(data, a, a+blockSize, b) - a = b - b += 2 * blockSize - } - if m := a + blockSize; m < n { - symMergeOrdered(data, a, m, n) - } - blockSize *= 2 - } -} - -// symMergeOrdered merges the two sorted subsequences data[a:m] and data[m:b] using -// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum -// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz -// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in -// Computer Science, pages 714-723. Springer, 2004. -// -// Let M = m-a and N = b-n. Wolog M < N. -// The recursion depth is bound by ceil(log(N+M)). -// The algorithm needs O(M*log(N/M + 1)) calls to data.Less. -// The algorithm needs O((M+N)*log(M)) calls to data.Swap. -// -// The paper gives O((M+N)*log(M)) as the number of assignments assuming a -// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation -// in the paper carries through for Swap operations, especially as the block -// swapping rotate uses only O(M+N) Swaps. -// -// symMerge assumes non-degenerate arguments: a < m && m < b. -// Having the caller check this condition eliminates many leaf recursion calls, -// which improves performance. -func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) { - // Avoid unnecessary recursions of symMerge - // by direct insertion of data[a] into data[m:b] - // if data[a:m] only contains one element. - if m-a == 1 { - // Use binary search to find the lowest index i - // such that data[i] >= data[a] for m <= i < b. - // Exit the search loop with i == b in case no such index exists. - i := m - j := b - for i < j { - h := int(uint(i+j) >> 1) - if data[h] < data[a] { - i = h + 1 - } else { - j = h - } - } - // Swap values until data[a] reaches the position before i. - for k := a; k < i-1; k++ { - data[k], data[k+1] = data[k+1], data[k] - } - return - } - - // Avoid unnecessary recursions of symMerge - // by direct insertion of data[m] into data[a:m] - // if data[m:b] only contains one element. - if b-m == 1 { - // Use binary search to find the lowest index i - // such that data[i] > data[m] for a <= i < m. - // Exit the search loop with i == m in case no such index exists. - i := a - j := m - for i < j { - h := int(uint(i+j) >> 1) - if !(data[m] < data[h]) { - i = h + 1 - } else { - j = h - } - } - // Swap values until data[m] reaches the position i. - for k := m; k > i; k-- { - data[k], data[k-1] = data[k-1], data[k] - } - return - } - - mid := int(uint(a+b) >> 1) - n := mid + m - var start, r int - if m > mid { - start = n - b - r = mid - } else { - start = a - r = m - } - p := n - 1 - - for start < r { - c := int(uint(start+r) >> 1) - if !(data[p-c] < data[c]) { - start = c + 1 - } else { - r = c - } - } - - end := n - start - if start < m && m < end { - rotateOrdered(data, start, m, end) - } - if a < start && start < mid { - symMergeOrdered(data, a, start, mid) - } - if mid < end && end < b { - symMergeOrdered(data, mid, end, b) - } -} - -// rotateOrdered rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: -// Data of the form 'x u v y' is changed to 'x v u y'. -// rotate performs at most b-a many calls to data.Swap, -// and it assumes non-degenerate arguments: a < m && m < b. -func rotateOrdered[E constraints.Ordered](data []E, a, m, b int) { - i := m - a - j := b - m - - for i != j { - if i > j { - swapRangeOrdered(data, m-i, m, j) - i -= j - } else { - swapRangeOrdered(data, m-i, m+j-i, i) - j -= i - } - } - // i == j - swapRangeOrdered(data, m-i, m, i) -} +// Code generated by gen_sort_variants.go; DO NOT EDIT. + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +import "golang.org/x/exp/constraints" + +// insertionSortOrdered sorts data[a:b] using insertion sort. +func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) { + for i := a + 1; i < b; i++ { + for j := i; j > a && (data[j] < data[j-1]); j-- { + data[j], data[j-1] = data[j-1], data[j] + } + } +} + +// siftDownOrdered implements the heap property on data[lo:hi]. +// first is an offset into the array where the root of the heap lies. +func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) { + root := lo + for { + child := 2*root + 1 + if child >= hi { + break + } + if child+1 < hi && (data[first+child] < data[first+child+1]) { + child++ + } + if !(data[first+root] < data[first+child]) { + return + } + data[first+root], data[first+child] = data[first+child], data[first+root] + root = child + } +} + +func heapSortOrdered[E constraints.Ordered](data []E, a, b int) { + first := a + lo := 0 + hi := b - a + + // Build heap with greatest element at top. + for i := (hi - 1) / 2; i >= 0; i-- { + siftDownOrdered(data, i, hi, first) + } + + // Pop elements, largest first, into end of data. + for i := hi - 1; i >= 0; i-- { + data[first], data[first+i] = data[first+i], data[first] + siftDownOrdered(data, lo, i, first) + } +} + +// pdqsortOrdered sorts data[a:b]. +// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. +// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf +// C++ implementation: https://github.com/orlp/pdqsort +// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ +// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. +func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) { + const maxInsertion = 12 + + var ( + wasBalanced = true // whether the last partitioning was reasonably balanced + wasPartitioned = true // whether the slice was already partitioned + ) + + for { + length := b - a + + if length <= maxInsertion { + insertionSortOrdered(data, a, b) + return + } + + // Fall back to heapsort if too many bad choices were made. + if limit == 0 { + heapSortOrdered(data, a, b) + return + } + + // If the last partitioning was imbalanced, we need to breaking patterns. + if !wasBalanced { + breakPatternsOrdered(data, a, b) + limit-- + } + + pivot, hint := choosePivotOrdered(data, a, b) + if hint == decreasingHint { + reverseRangeOrdered(data, a, b) + // The chosen pivot was pivot-a elements after the start of the array. + // After reversing it is pivot-a elements before the end of the array. + // The idea came from Rust's implementation. + pivot = (b - 1) - (pivot - a) + hint = increasingHint + } + + // The slice is likely already sorted. + if wasBalanced && wasPartitioned && hint == increasingHint { + if partialInsertionSortOrdered(data, a, b) { + return + } + } + + // Probably the slice contains many duplicate elements, partition the slice into + // elements equal to and elements greater than the pivot. + if a > 0 && !(data[a-1] < data[pivot]) { + mid := partitionEqualOrdered(data, a, b, pivot) + a = mid + continue + } + + mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot) + wasPartitioned = alreadyPartitioned + + leftLen, rightLen := mid-a, b-mid + balanceThreshold := length / 8 + if leftLen < rightLen { + wasBalanced = leftLen >= balanceThreshold + pdqsortOrdered(data, a, mid, limit) + a = mid + 1 + } else { + wasBalanced = rightLen >= balanceThreshold + pdqsortOrdered(data, mid+1, b, limit) + b = mid + } + } +} + +// partitionOrdered does one quicksort partition. +// Let p = data[pivot] +// Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. +// On return, data[newpivot] = p +func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int, alreadyPartitioned bool) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for i <= j && (data[i] < data[a]) { + i++ + } + for i <= j && !(data[j] < data[a]) { + j-- + } + if i > j { + data[j], data[a] = data[a], data[j] + return j, true + } + data[i], data[j] = data[j], data[i] + i++ + j-- + + for { + for i <= j && (data[i] < data[a]) { + i++ + } + for i <= j && !(data[j] < data[a]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + data[j], data[a] = data[a], data[j] + return j, false +} + +// partitionEqualOrdered partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. +// It assumed that data[a:b] does not contain elements smaller than the data[pivot]. +func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for { + for i <= j && !(data[a] < data[i]) { + i++ + } + for i <= j && (data[a] < data[j]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + return i +} + +// partialInsertionSortOrdered partially sorts a slice, returns true if the slice is sorted at the end. +func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool { + const ( + maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted + shortestShifting = 50 // don't shift any elements on short arrays + ) + i := a + 1 + for j := 0; j < maxSteps; j++ { + for i < b && !(data[i] < data[i-1]) { + i++ + } + + if i == b { + return true + } + + if b-a < shortestShifting { + return false + } + + data[i], data[i-1] = data[i-1], data[i] + + // Shift the smaller one to the left. + if i-a >= 2 { + for j := i - 1; j >= 1; j-- { + if !(data[j] < data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + // Shift the greater one to the right. + if b-i >= 2 { + for j := i + 1; j < b; j++ { + if !(data[j] < data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + } + return false +} + +// breakPatternsOrdered scatters some elements around in an attempt to break some patterns +// that might cause imbalanced partitions in quicksort. +func breakPatternsOrdered[E constraints.Ordered](data []E, a, b int) { + length := b - a + if length >= 8 { + random := xorshift(length) + modulus := nextPowerOfTwo(length) + + for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ { + other := int(uint(random.Next()) & (modulus - 1)) + if other >= length { + other -= length + } + data[idx], data[a+other] = data[a+other], data[idx] + } + } +} + +// choosePivotOrdered chooses a pivot in data[a:b]. +// +// [0,8): chooses a static pivot. +// [8,shortestNinther): uses the simple median-of-three method. +// [shortestNinther,∞): uses the Tukey ninther method. +func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, hint sortedHint) { + const ( + shortestNinther = 50 + maxSwaps = 4 * 3 + ) + + l := b - a + + var ( + swaps int + i = a + l/4*1 + j = a + l/4*2 + k = a + l/4*3 + ) + + if l >= 8 { + if l >= shortestNinther { + // Tukey ninther method, the idea came from Rust's implementation. + i = medianAdjacentOrdered(data, i, &swaps) + j = medianAdjacentOrdered(data, j, &swaps) + k = medianAdjacentOrdered(data, k, &swaps) + } + // Find the median among i, j, k and stores it into j. + j = medianOrdered(data, i, j, k, &swaps) + } + + switch swaps { + case 0: + return j, increasingHint + case maxSwaps: + return j, decreasingHint + default: + return j, unknownHint + } +} + +// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. +func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) { + if data[b] < data[a] { + *swaps++ + return b, a + } + return a, b +} + +// medianOrdered returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. +func medianOrdered[E constraints.Ordered](data []E, a, b, c int, swaps *int) int { + a, b = order2Ordered(data, a, b, swaps) + b, c = order2Ordered(data, b, c, swaps) + a, b = order2Ordered(data, a, b, swaps) + return b +} + +// medianAdjacentOrdered finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. +func medianAdjacentOrdered[E constraints.Ordered](data []E, a int, swaps *int) int { + return medianOrdered(data, a-1, a, a+1, swaps) +} + +func reverseRangeOrdered[E constraints.Ordered](data []E, a, b int) { + i := a + j := b - 1 + for i < j { + data[i], data[j] = data[j], data[i] + i++ + j-- + } +} + +func swapRangeOrdered[E constraints.Ordered](data []E, a, b, n int) { + for i := 0; i < n; i++ { + data[a+i], data[b+i] = data[b+i], data[a+i] + } +} + +func stableOrdered[E constraints.Ordered](data []E, n int) { + blockSize := 20 // must be > 0 + a, b := 0, blockSize + for b <= n { + insertionSortOrdered(data, a, b) + a = b + b += blockSize + } + insertionSortOrdered(data, a, n) + + for blockSize < n { + a, b = 0, 2*blockSize + for b <= n { + symMergeOrdered(data, a, a+blockSize, b) + a = b + b += 2 * blockSize + } + if m := a + blockSize; m < n { + symMergeOrdered(data, a, m, n) + } + blockSize *= 2 + } +} + +// symMergeOrdered merges the two sorted subsequences data[a:m] and data[m:b] using +// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum +// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz +// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in +// Computer Science, pages 714-723. Springer, 2004. +// +// Let M = m-a and N = b-n. Wolog M < N. +// The recursion depth is bound by ceil(log(N+M)). +// The algorithm needs O(M*log(N/M + 1)) calls to data.Less. +// The algorithm needs O((M+N)*log(M)) calls to data.Swap. +// +// The paper gives O((M+N)*log(M)) as the number of assignments assuming a +// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation +// in the paper carries through for Swap operations, especially as the block +// swapping rotate uses only O(M+N) Swaps. +// +// symMerge assumes non-degenerate arguments: a < m && m < b. +// Having the caller check this condition eliminates many leaf recursion calls, +// which improves performance. +func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) { + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[a] into data[m:b] + // if data[a:m] only contains one element. + if m-a == 1 { + // Use binary search to find the lowest index i + // such that data[i] >= data[a] for m <= i < b. + // Exit the search loop with i == b in case no such index exists. + i := m + j := b + for i < j { + h := int(uint(i+j) >> 1) + if data[h] < data[a] { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[a] reaches the position before i. + for k := a; k < i-1; k++ { + data[k], data[k+1] = data[k+1], data[k] + } + return + } + + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[m] into data[a:m] + // if data[m:b] only contains one element. + if b-m == 1 { + // Use binary search to find the lowest index i + // such that data[i] > data[m] for a <= i < m. + // Exit the search loop with i == m in case no such index exists. + i := a + j := m + for i < j { + h := int(uint(i+j) >> 1) + if !(data[m] < data[h]) { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[m] reaches the position i. + for k := m; k > i; k-- { + data[k], data[k-1] = data[k-1], data[k] + } + return + } + + mid := int(uint(a+b) >> 1) + n := mid + m + var start, r int + if m > mid { + start = n - b + r = mid + } else { + start = a + r = m + } + p := n - 1 + + for start < r { + c := int(uint(start+r) >> 1) + if !(data[p-c] < data[c]) { + start = c + 1 + } else { + r = c + } + } + + end := n - start + if start < m && m < end { + rotateOrdered(data, start, m, end) + } + if a < start && start < mid { + symMergeOrdered(data, a, start, mid) + } + if mid < end && end < b { + symMergeOrdered(data, mid, end, b) + } +} + +// rotateOrdered rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: +// Data of the form 'x u v y' is changed to 'x v u y'. +// rotate performs at most b-a many calls to data.Swap, +// and it assumes non-degenerate arguments: a < m && m < b. +func rotateOrdered[E constraints.Ordered](data []E, a, m, b int) { + i := m - a + j := b - m + + for i != j { + if i > j { + swapRangeOrdered(data, m-i, m, j) + i -= j + } else { + swapRangeOrdered(data, m-i, m+j-i, i) + j -= i + } + } + // i == j + swapRangeOrdered(data, m-i, m, i) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 9fefa57685..829f927366 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,42 +1,42 @@ -# github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230512164433-5d1fd1a340c9 -## explicit; go 1.18 -github.com/antlr/antlr4/runtime/Go/antlr/v4 -# github.com/go-chi/chi v1.5.4 -## explicit; go 1.16 -github.com/go-chi/chi -# github.com/go-chi/cors v1.2.1 -## explicit; go 1.14 -github.com/go-chi/cors -# github.com/google/uuid v1.3.0 -## explicit -github.com/google/uuid -# github.com/joho/godotenv v1.5.1 -## explicit; go 1.12 -github.com/joho/godotenv -# github.com/klauspost/compress v1.15.15 -## explicit; go 1.17 -github.com/klauspost/compress/flate -# github.com/libsql/sqlite-antlr4-parser v0.0.0-20230802215326-5cb5bb604475 -## explicit; go 1.19 -github.com/libsql/sqlite-antlr4-parser/sqliteparser -github.com/libsql/sqlite-antlr4-parser/sqliteparserutils -# github.com/tursodatabase/libsql-client-go v0.0.0-20240220085343-4ae0eb9d0898 -## explicit; go 1.20 -github.com/tursodatabase/libsql-client-go/libsql -github.com/tursodatabase/libsql-client-go/libsql/internal/hrana -github.com/tursodatabase/libsql-client-go/libsql/internal/http -github.com/tursodatabase/libsql-client-go/libsql/internal/http/hranaV2 -github.com/tursodatabase/libsql-client-go/libsql/internal/http/shared -github.com/tursodatabase/libsql-client-go/libsql/internal/ws -# golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e -## explicit; go 1.18 -golang.org/x/exp/constraints -golang.org/x/exp/slices -# nhooyr.io/websocket v1.8.7 -## explicit; go 1.13 -nhooyr.io/websocket -nhooyr.io/websocket/internal/bpool -nhooyr.io/websocket/internal/errd -nhooyr.io/websocket/internal/wsjs -nhooyr.io/websocket/internal/xsync -nhooyr.io/websocket/wsjson +# github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230512164433-5d1fd1a340c9 +## explicit; go 1.18 +github.com/antlr/antlr4/runtime/Go/antlr/v4 +# github.com/go-chi/chi v1.5.4 +## explicit; go 1.16 +github.com/go-chi/chi +# github.com/go-chi/cors v1.2.1 +## explicit; go 1.14 +github.com/go-chi/cors +# github.com/google/uuid v1.3.0 +## explicit +github.com/google/uuid +# github.com/joho/godotenv v1.5.1 +## explicit; go 1.12 +github.com/joho/godotenv +# github.com/klauspost/compress v1.15.15 +## explicit; go 1.17 +github.com/klauspost/compress/flate +# github.com/libsql/sqlite-antlr4-parser v0.0.0-20230802215326-5cb5bb604475 +## explicit; go 1.19 +github.com/libsql/sqlite-antlr4-parser/sqliteparser +github.com/libsql/sqlite-antlr4-parser/sqliteparserutils +# github.com/tursodatabase/libsql-client-go v0.0.0-20240220085343-4ae0eb9d0898 +## explicit; go 1.20 +github.com/tursodatabase/libsql-client-go/libsql +github.com/tursodatabase/libsql-client-go/libsql/internal/hrana +github.com/tursodatabase/libsql-client-go/libsql/internal/http +github.com/tursodatabase/libsql-client-go/libsql/internal/http/hranaV2 +github.com/tursodatabase/libsql-client-go/libsql/internal/http/shared +github.com/tursodatabase/libsql-client-go/libsql/internal/ws +# golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e +## explicit; go 1.18 +golang.org/x/exp/constraints +golang.org/x/exp/slices +# nhooyr.io/websocket v1.8.7 +## explicit; go 1.13 +nhooyr.io/websocket +nhooyr.io/websocket/internal/bpool +nhooyr.io/websocket/internal/errd +nhooyr.io/websocket/internal/wsjs +nhooyr.io/websocket/internal/xsync +nhooyr.io/websocket/wsjson diff --git a/vendor/nhooyr.io/websocket/.gitignore b/vendor/nhooyr.io/websocket/.gitignore index 6961e5c894..90a9d3a333 100644 --- a/vendor/nhooyr.io/websocket/.gitignore +++ b/vendor/nhooyr.io/websocket/.gitignore @@ -1 +1 @@ -websocket.test +websocket.test diff --git a/vendor/nhooyr.io/websocket/LICENSE.txt b/vendor/nhooyr.io/websocket/LICENSE.txt index b5b5fef31f..88c860f517 100644 --- a/vendor/nhooyr.io/websocket/LICENSE.txt +++ b/vendor/nhooyr.io/websocket/LICENSE.txt @@ -1,21 +1,21 @@ -MIT License - -Copyright (c) 2018 Anmol Sethi - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. +MIT License + +Copyright (c) 2018 Anmol Sethi + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/nhooyr.io/websocket/README.md b/vendor/nhooyr.io/websocket/README.md index df20c581a5..86defd527c 100644 --- a/vendor/nhooyr.io/websocket/README.md +++ b/vendor/nhooyr.io/websocket/README.md @@ -1,132 +1,132 @@ -# websocket - -[![godoc](https://godoc.org/nhooyr.io/websocket?status.svg)](https://pkg.go.dev/nhooyr.io/websocket) -[![coverage](https://img.shields.io/badge/coverage-88%25-success)](https://nhooyrio-websocket-coverage.netlify.app) - -websocket is a minimal and idiomatic WebSocket library for Go. - -## Install - -```bash -go get nhooyr.io/websocket -``` - -## Highlights - -- Minimal and idiomatic API -- First class [context.Context](https://blog.golang.org/context) support -- Fully passes the WebSocket [autobahn-testsuite](https://github.com/crossbario/autobahn-testsuite) -- [Single dependency](https://pkg.go.dev/nhooyr.io/websocket?tab=imports) -- JSON and protobuf helpers in the [wsjson](https://pkg.go.dev/nhooyr.io/websocket/wsjson) and [wspb](https://pkg.go.dev/nhooyr.io/websocket/wspb) subpackages -- Zero alloc reads and writes -- Concurrent writes -- [Close handshake](https://pkg.go.dev/nhooyr.io/websocket#Conn.Close) -- [net.Conn](https://pkg.go.dev/nhooyr.io/websocket#NetConn) wrapper -- [Ping pong](https://pkg.go.dev/nhooyr.io/websocket#Conn.Ping) API -- [RFC 7692](https://tools.ietf.org/html/rfc7692) permessage-deflate compression -- Compile to [Wasm](https://pkg.go.dev/nhooyr.io/websocket#hdr-Wasm) - -## Roadmap - -- [ ] HTTP/2 [#4](https://github.com/nhooyr/websocket/issues/4) - -## Examples - -For a production quality example that demonstrates the complete API, see the -[echo example](./examples/echo). - -For a full stack example, see the [chat example](./examples/chat). - -### Server - -```go -http.HandlerFunc(func (w http.ResponseWriter, r *http.Request) { - c, err := websocket.Accept(w, r, nil) - if err != nil { - // ... - } - defer c.Close(websocket.StatusInternalError, "the sky is falling") - - ctx, cancel := context.WithTimeout(r.Context(), time.Second*10) - defer cancel() - - var v interface{} - err = wsjson.Read(ctx, c, &v) - if err != nil { - // ... - } - - log.Printf("received: %v", v) - - c.Close(websocket.StatusNormalClosure, "") -}) -``` - -### Client - -```go -ctx, cancel := context.WithTimeout(context.Background(), time.Minute) -defer cancel() - -c, _, err := websocket.Dial(ctx, "ws://localhost:8080", nil) -if err != nil { - // ... -} -defer c.Close(websocket.StatusInternalError, "the sky is falling") - -err = wsjson.Write(ctx, c, "hi") -if err != nil { - // ... -} - -c.Close(websocket.StatusNormalClosure, "") -``` - -## Comparison - -### gorilla/websocket - -Advantages of [gorilla/websocket](https://github.com/gorilla/websocket): - -- Mature and widely used -- [Prepared writes](https://pkg.go.dev/github.com/gorilla/websocket#PreparedMessage) -- Configurable [buffer sizes](https://pkg.go.dev/github.com/gorilla/websocket#hdr-Buffers) - -Advantages of nhooyr.io/websocket: - -- Minimal and idiomatic API - - Compare godoc of [nhooyr.io/websocket](https://pkg.go.dev/nhooyr.io/websocket) with [gorilla/websocket](https://pkg.go.dev/github.com/gorilla/websocket) side by side. -- [net.Conn](https://pkg.go.dev/nhooyr.io/websocket#NetConn) wrapper -- Zero alloc reads and writes ([gorilla/websocket#535](https://github.com/gorilla/websocket/issues/535)) -- Full [context.Context](https://blog.golang.org/context) support -- Dial uses [net/http.Client](https://golang.org/pkg/net/http/#Client) - - Will enable easy HTTP/2 support in the future - - Gorilla writes directly to a net.Conn and so duplicates features of net/http.Client. -- Concurrent writes -- Close handshake ([gorilla/websocket#448](https://github.com/gorilla/websocket/issues/448)) -- Idiomatic [ping pong](https://pkg.go.dev/nhooyr.io/websocket#Conn.Ping) API - - Gorilla requires registering a pong callback before sending a Ping -- Can target Wasm ([gorilla/websocket#432](https://github.com/gorilla/websocket/issues/432)) -- Transparent message buffer reuse with [wsjson](https://pkg.go.dev/nhooyr.io/websocket/wsjson) and [wspb](https://pkg.go.dev/nhooyr.io/websocket/wspb) subpackages -- [1.75x](https://github.com/nhooyr/websocket/releases/tag/v1.7.4) faster WebSocket masking implementation in pure Go - - Gorilla's implementation is slower and uses [unsafe](https://golang.org/pkg/unsafe/). -- Full [permessage-deflate](https://tools.ietf.org/html/rfc7692) compression extension support - - Gorilla only supports no context takeover mode - - We use [klauspost/compress](https://github.com/klauspost/compress) for much lower memory usage ([gorilla/websocket#203](https://github.com/gorilla/websocket/issues/203)) -- [CloseRead](https://pkg.go.dev/nhooyr.io/websocket#Conn.CloseRead) helper ([gorilla/websocket#492](https://github.com/gorilla/websocket/issues/492)) -- Actively maintained ([gorilla/websocket#370](https://github.com/gorilla/websocket/issues/370)) - -#### golang.org/x/net/websocket - -[golang.org/x/net/websocket](https://pkg.go.dev/golang.org/x/net/websocket) is deprecated. -See [golang/go/issues/18152](https://github.com/golang/go/issues/18152). - -The [net.Conn](https://pkg.go.dev/nhooyr.io/websocket#NetConn) can help in transitioning -to nhooyr.io/websocket. - -#### gobwas/ws - -[gobwas/ws](https://github.com/gobwas/ws) has an extremely flexible API that allows it to be used -in an event driven style for performance. See the author's [blog post](https://medium.freecodecamp.org/million-websockets-and-go-cc58418460bb). - -However when writing idiomatic Go, nhooyr.io/websocket will be faster and easier to use. +# websocket + +[![godoc](https://godoc.org/nhooyr.io/websocket?status.svg)](https://pkg.go.dev/nhooyr.io/websocket) +[![coverage](https://img.shields.io/badge/coverage-88%25-success)](https://nhooyrio-websocket-coverage.netlify.app) + +websocket is a minimal and idiomatic WebSocket library for Go. + +## Install + +```bash +go get nhooyr.io/websocket +``` + +## Highlights + +- Minimal and idiomatic API +- First class [context.Context](https://blog.golang.org/context) support +- Fully passes the WebSocket [autobahn-testsuite](https://github.com/crossbario/autobahn-testsuite) +- [Single dependency](https://pkg.go.dev/nhooyr.io/websocket?tab=imports) +- JSON and protobuf helpers in the [wsjson](https://pkg.go.dev/nhooyr.io/websocket/wsjson) and [wspb](https://pkg.go.dev/nhooyr.io/websocket/wspb) subpackages +- Zero alloc reads and writes +- Concurrent writes +- [Close handshake](https://pkg.go.dev/nhooyr.io/websocket#Conn.Close) +- [net.Conn](https://pkg.go.dev/nhooyr.io/websocket#NetConn) wrapper +- [Ping pong](https://pkg.go.dev/nhooyr.io/websocket#Conn.Ping) API +- [RFC 7692](https://tools.ietf.org/html/rfc7692) permessage-deflate compression +- Compile to [Wasm](https://pkg.go.dev/nhooyr.io/websocket#hdr-Wasm) + +## Roadmap + +- [ ] HTTP/2 [#4](https://github.com/nhooyr/websocket/issues/4) + +## Examples + +For a production quality example that demonstrates the complete API, see the +[echo example](./examples/echo). + +For a full stack example, see the [chat example](./examples/chat). + +### Server + +```go +http.HandlerFunc(func (w http.ResponseWriter, r *http.Request) { + c, err := websocket.Accept(w, r, nil) + if err != nil { + // ... + } + defer c.Close(websocket.StatusInternalError, "the sky is falling") + + ctx, cancel := context.WithTimeout(r.Context(), time.Second*10) + defer cancel() + + var v interface{} + err = wsjson.Read(ctx, c, &v) + if err != nil { + // ... + } + + log.Printf("received: %v", v) + + c.Close(websocket.StatusNormalClosure, "") +}) +``` + +### Client + +```go +ctx, cancel := context.WithTimeout(context.Background(), time.Minute) +defer cancel() + +c, _, err := websocket.Dial(ctx, "ws://localhost:8080", nil) +if err != nil { + // ... +} +defer c.Close(websocket.StatusInternalError, "the sky is falling") + +err = wsjson.Write(ctx, c, "hi") +if err != nil { + // ... +} + +c.Close(websocket.StatusNormalClosure, "") +``` + +## Comparison + +### gorilla/websocket + +Advantages of [gorilla/websocket](https://github.com/gorilla/websocket): + +- Mature and widely used +- [Prepared writes](https://pkg.go.dev/github.com/gorilla/websocket#PreparedMessage) +- Configurable [buffer sizes](https://pkg.go.dev/github.com/gorilla/websocket#hdr-Buffers) + +Advantages of nhooyr.io/websocket: + +- Minimal and idiomatic API + - Compare godoc of [nhooyr.io/websocket](https://pkg.go.dev/nhooyr.io/websocket) with [gorilla/websocket](https://pkg.go.dev/github.com/gorilla/websocket) side by side. +- [net.Conn](https://pkg.go.dev/nhooyr.io/websocket#NetConn) wrapper +- Zero alloc reads and writes ([gorilla/websocket#535](https://github.com/gorilla/websocket/issues/535)) +- Full [context.Context](https://blog.golang.org/context) support +- Dial uses [net/http.Client](https://golang.org/pkg/net/http/#Client) + - Will enable easy HTTP/2 support in the future + - Gorilla writes directly to a net.Conn and so duplicates features of net/http.Client. +- Concurrent writes +- Close handshake ([gorilla/websocket#448](https://github.com/gorilla/websocket/issues/448)) +- Idiomatic [ping pong](https://pkg.go.dev/nhooyr.io/websocket#Conn.Ping) API + - Gorilla requires registering a pong callback before sending a Ping +- Can target Wasm ([gorilla/websocket#432](https://github.com/gorilla/websocket/issues/432)) +- Transparent message buffer reuse with [wsjson](https://pkg.go.dev/nhooyr.io/websocket/wsjson) and [wspb](https://pkg.go.dev/nhooyr.io/websocket/wspb) subpackages +- [1.75x](https://github.com/nhooyr/websocket/releases/tag/v1.7.4) faster WebSocket masking implementation in pure Go + - Gorilla's implementation is slower and uses [unsafe](https://golang.org/pkg/unsafe/). +- Full [permessage-deflate](https://tools.ietf.org/html/rfc7692) compression extension support + - Gorilla only supports no context takeover mode + - We use [klauspost/compress](https://github.com/klauspost/compress) for much lower memory usage ([gorilla/websocket#203](https://github.com/gorilla/websocket/issues/203)) +- [CloseRead](https://pkg.go.dev/nhooyr.io/websocket#Conn.CloseRead) helper ([gorilla/websocket#492](https://github.com/gorilla/websocket/issues/492)) +- Actively maintained ([gorilla/websocket#370](https://github.com/gorilla/websocket/issues/370)) + +#### golang.org/x/net/websocket + +[golang.org/x/net/websocket](https://pkg.go.dev/golang.org/x/net/websocket) is deprecated. +See [golang/go/issues/18152](https://github.com/golang/go/issues/18152). + +The [net.Conn](https://pkg.go.dev/nhooyr.io/websocket#NetConn) can help in transitioning +to nhooyr.io/websocket. + +#### gobwas/ws + +[gobwas/ws](https://github.com/gobwas/ws) has an extremely flexible API that allows it to be used +in an event driven style for performance. See the author's [blog post](https://medium.freecodecamp.org/million-websockets-and-go-cc58418460bb). + +However when writing idiomatic Go, nhooyr.io/websocket will be faster and easier to use. diff --git a/vendor/nhooyr.io/websocket/accept.go b/vendor/nhooyr.io/websocket/accept.go index 18536bdb2c..06ea7f5c0e 100644 --- a/vendor/nhooyr.io/websocket/accept.go +++ b/vendor/nhooyr.io/websocket/accept.go @@ -1,370 +1,370 @@ -// +build !js - -package websocket - -import ( - "bytes" - "crypto/sha1" - "encoding/base64" - "errors" - "fmt" - "io" - "log" - "net/http" - "net/textproto" - "net/url" - "path/filepath" - "strings" - - "nhooyr.io/websocket/internal/errd" -) - -// AcceptOptions represents Accept's options. -type AcceptOptions struct { - // Subprotocols lists the WebSocket subprotocols that Accept will negotiate with the client. - // The empty subprotocol will always be negotiated as per RFC 6455. If you would like to - // reject it, close the connection when c.Subprotocol() == "". - Subprotocols []string - - // InsecureSkipVerify is used to disable Accept's origin verification behaviour. - // - // You probably want to use OriginPatterns instead. - InsecureSkipVerify bool - - // OriginPatterns lists the host patterns for authorized origins. - // The request host is always authorized. - // Use this to enable cross origin WebSockets. - // - // i.e javascript running on example.com wants to access a WebSocket server at chat.example.com. - // In such a case, example.com is the origin and chat.example.com is the request host. - // One would set this field to []string{"example.com"} to authorize example.com to connect. - // - // Each pattern is matched case insensitively against the request origin host - // with filepath.Match. - // See https://golang.org/pkg/path/filepath/#Match - // - // Please ensure you understand the ramifications of enabling this. - // If used incorrectly your WebSocket server will be open to CSRF attacks. - // - // Do not use * as a pattern to allow any origin, prefer to use InsecureSkipVerify instead - // to bring attention to the danger of such a setting. - OriginPatterns []string - - // CompressionMode controls the compression mode. - // Defaults to CompressionNoContextTakeover. - // - // See docs on CompressionMode for details. - CompressionMode CompressionMode - - // CompressionThreshold controls the minimum size of a message before compression is applied. - // - // Defaults to 512 bytes for CompressionNoContextTakeover and 128 bytes - // for CompressionContextTakeover. - CompressionThreshold int -} - -// Accept accepts a WebSocket handshake from a client and upgrades the -// the connection to a WebSocket. -// -// Accept will not allow cross origin requests by default. -// See the InsecureSkipVerify and OriginPatterns options to allow cross origin requests. -// -// Accept will write a response to w on all errors. -func Accept(w http.ResponseWriter, r *http.Request, opts *AcceptOptions) (*Conn, error) { - return accept(w, r, opts) -} - -func accept(w http.ResponseWriter, r *http.Request, opts *AcceptOptions) (_ *Conn, err error) { - defer errd.Wrap(&err, "failed to accept WebSocket connection") - - if opts == nil { - opts = &AcceptOptions{} - } - opts = &*opts - - errCode, err := verifyClientRequest(w, r) - if err != nil { - http.Error(w, err.Error(), errCode) - return nil, err - } - - if !opts.InsecureSkipVerify { - err = authenticateOrigin(r, opts.OriginPatterns) - if err != nil { - if errors.Is(err, filepath.ErrBadPattern) { - log.Printf("websocket: %v", err) - err = errors.New(http.StatusText(http.StatusForbidden)) - } - http.Error(w, err.Error(), http.StatusForbidden) - return nil, err - } - } - - hj, ok := w.(http.Hijacker) - if !ok { - err = errors.New("http.ResponseWriter does not implement http.Hijacker") - http.Error(w, http.StatusText(http.StatusNotImplemented), http.StatusNotImplemented) - return nil, err - } - - w.Header().Set("Upgrade", "websocket") - w.Header().Set("Connection", "Upgrade") - - key := r.Header.Get("Sec-WebSocket-Key") - w.Header().Set("Sec-WebSocket-Accept", secWebSocketAccept(key)) - - subproto := selectSubprotocol(r, opts.Subprotocols) - if subproto != "" { - w.Header().Set("Sec-WebSocket-Protocol", subproto) - } - - copts, err := acceptCompression(r, w, opts.CompressionMode) - if err != nil { - return nil, err - } - - w.WriteHeader(http.StatusSwitchingProtocols) - // See https://github.com/nhooyr/websocket/issues/166 - if ginWriter, ok := w.(interface { - WriteHeaderNow() - }); ok { - ginWriter.WriteHeaderNow() - } - - netConn, brw, err := hj.Hijack() - if err != nil { - err = fmt.Errorf("failed to hijack connection: %w", err) - http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError) - return nil, err - } - - // https://github.com/golang/go/issues/32314 - b, _ := brw.Reader.Peek(brw.Reader.Buffered()) - brw.Reader.Reset(io.MultiReader(bytes.NewReader(b), netConn)) - - return newConn(connConfig{ - subprotocol: w.Header().Get("Sec-WebSocket-Protocol"), - rwc: netConn, - client: false, - copts: copts, - flateThreshold: opts.CompressionThreshold, - - br: brw.Reader, - bw: brw.Writer, - }), nil -} - -func verifyClientRequest(w http.ResponseWriter, r *http.Request) (errCode int, _ error) { - if !r.ProtoAtLeast(1, 1) { - return http.StatusUpgradeRequired, fmt.Errorf("WebSocket protocol violation: handshake request must be at least HTTP/1.1: %q", r.Proto) - } - - if !headerContainsTokenIgnoreCase(r.Header, "Connection", "Upgrade") { - w.Header().Set("Connection", "Upgrade") - w.Header().Set("Upgrade", "websocket") - return http.StatusUpgradeRequired, fmt.Errorf("WebSocket protocol violation: Connection header %q does not contain Upgrade", r.Header.Get("Connection")) - } - - if !headerContainsTokenIgnoreCase(r.Header, "Upgrade", "websocket") { - w.Header().Set("Connection", "Upgrade") - w.Header().Set("Upgrade", "websocket") - return http.StatusUpgradeRequired, fmt.Errorf("WebSocket protocol violation: Upgrade header %q does not contain websocket", r.Header.Get("Upgrade")) - } - - if r.Method != "GET" { - return http.StatusMethodNotAllowed, fmt.Errorf("WebSocket protocol violation: handshake request method is not GET but %q", r.Method) - } - - if r.Header.Get("Sec-WebSocket-Version") != "13" { - w.Header().Set("Sec-WebSocket-Version", "13") - return http.StatusBadRequest, fmt.Errorf("unsupported WebSocket protocol version (only 13 is supported): %q", r.Header.Get("Sec-WebSocket-Version")) - } - - if r.Header.Get("Sec-WebSocket-Key") == "" { - return http.StatusBadRequest, errors.New("WebSocket protocol violation: missing Sec-WebSocket-Key") - } - - return 0, nil -} - -func authenticateOrigin(r *http.Request, originHosts []string) error { - origin := r.Header.Get("Origin") - if origin == "" { - return nil - } - - u, err := url.Parse(origin) - if err != nil { - return fmt.Errorf("failed to parse Origin header %q: %w", origin, err) - } - - if strings.EqualFold(r.Host, u.Host) { - return nil - } - - for _, hostPattern := range originHosts { - matched, err := match(hostPattern, u.Host) - if err != nil { - return fmt.Errorf("failed to parse filepath pattern %q: %w", hostPattern, err) - } - if matched { - return nil - } - } - return fmt.Errorf("request Origin %q is not authorized for Host %q", origin, r.Host) -} - -func match(pattern, s string) (bool, error) { - return filepath.Match(strings.ToLower(pattern), strings.ToLower(s)) -} - -func selectSubprotocol(r *http.Request, subprotocols []string) string { - cps := headerTokens(r.Header, "Sec-WebSocket-Protocol") - for _, sp := range subprotocols { - for _, cp := range cps { - if strings.EqualFold(sp, cp) { - return cp - } - } - } - return "" -} - -func acceptCompression(r *http.Request, w http.ResponseWriter, mode CompressionMode) (*compressionOptions, error) { - if mode == CompressionDisabled { - return nil, nil - } - - for _, ext := range websocketExtensions(r.Header) { - switch ext.name { - case "permessage-deflate": - return acceptDeflate(w, ext, mode) - // Disabled for now, see https://github.com/nhooyr/websocket/issues/218 - // case "x-webkit-deflate-frame": - // return acceptWebkitDeflate(w, ext, mode) - } - } - return nil, nil -} - -func acceptDeflate(w http.ResponseWriter, ext websocketExtension, mode CompressionMode) (*compressionOptions, error) { - copts := mode.opts() - - for _, p := range ext.params { - switch p { - case "client_no_context_takeover": - copts.clientNoContextTakeover = true - continue - case "server_no_context_takeover": - copts.serverNoContextTakeover = true - continue - } - - if strings.HasPrefix(p, "client_max_window_bits") { - // We cannot adjust the read sliding window so cannot make use of this. - continue - } - - err := fmt.Errorf("unsupported permessage-deflate parameter: %q", p) - http.Error(w, err.Error(), http.StatusBadRequest) - return nil, err - } - - copts.setHeader(w.Header()) - - return copts, nil -} - -func acceptWebkitDeflate(w http.ResponseWriter, ext websocketExtension, mode CompressionMode) (*compressionOptions, error) { - copts := mode.opts() - // The peer must explicitly request it. - copts.serverNoContextTakeover = false - - for _, p := range ext.params { - if p == "no_context_takeover" { - copts.serverNoContextTakeover = true - continue - } - - // We explicitly fail on x-webkit-deflate-frame's max_window_bits parameter instead - // of ignoring it as the draft spec is unclear. It says the server can ignore it - // but the server has no way of signalling to the client it was ignored as the parameters - // are set one way. - // Thus us ignoring it would make the client think we understood it which would cause issues. - // See https://tools.ietf.org/html/draft-tyoshino-hybi-websocket-perframe-deflate-06#section-4.1 - // - // Either way, we're only implementing this for webkit which never sends the max_window_bits - // parameter so we don't need to worry about it. - err := fmt.Errorf("unsupported x-webkit-deflate-frame parameter: %q", p) - http.Error(w, err.Error(), http.StatusBadRequest) - return nil, err - } - - s := "x-webkit-deflate-frame" - if copts.clientNoContextTakeover { - s += "; no_context_takeover" - } - w.Header().Set("Sec-WebSocket-Extensions", s) - - return copts, nil -} - -func headerContainsTokenIgnoreCase(h http.Header, key, token string) bool { - for _, t := range headerTokens(h, key) { - if strings.EqualFold(t, token) { - return true - } - } - return false -} - -type websocketExtension struct { - name string - params []string -} - -func websocketExtensions(h http.Header) []websocketExtension { - var exts []websocketExtension - extStrs := headerTokens(h, "Sec-WebSocket-Extensions") - for _, extStr := range extStrs { - if extStr == "" { - continue - } - - vals := strings.Split(extStr, ";") - for i := range vals { - vals[i] = strings.TrimSpace(vals[i]) - } - - e := websocketExtension{ - name: vals[0], - params: vals[1:], - } - - exts = append(exts, e) - } - return exts -} - -func headerTokens(h http.Header, key string) []string { - key = textproto.CanonicalMIMEHeaderKey(key) - var tokens []string - for _, v := range h[key] { - v = strings.TrimSpace(v) - for _, t := range strings.Split(v, ",") { - t = strings.TrimSpace(t) - tokens = append(tokens, t) - } - } - return tokens -} - -var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11") - -func secWebSocketAccept(secWebSocketKey string) string { - h := sha1.New() - h.Write([]byte(secWebSocketKey)) - h.Write(keyGUID) - - return base64.StdEncoding.EncodeToString(h.Sum(nil)) -} +// +build !js + +package websocket + +import ( + "bytes" + "crypto/sha1" + "encoding/base64" + "errors" + "fmt" + "io" + "log" + "net/http" + "net/textproto" + "net/url" + "path/filepath" + "strings" + + "nhooyr.io/websocket/internal/errd" +) + +// AcceptOptions represents Accept's options. +type AcceptOptions struct { + // Subprotocols lists the WebSocket subprotocols that Accept will negotiate with the client. + // The empty subprotocol will always be negotiated as per RFC 6455. If you would like to + // reject it, close the connection when c.Subprotocol() == "". + Subprotocols []string + + // InsecureSkipVerify is used to disable Accept's origin verification behaviour. + // + // You probably want to use OriginPatterns instead. + InsecureSkipVerify bool + + // OriginPatterns lists the host patterns for authorized origins. + // The request host is always authorized. + // Use this to enable cross origin WebSockets. + // + // i.e javascript running on example.com wants to access a WebSocket server at chat.example.com. + // In such a case, example.com is the origin and chat.example.com is the request host. + // One would set this field to []string{"example.com"} to authorize example.com to connect. + // + // Each pattern is matched case insensitively against the request origin host + // with filepath.Match. + // See https://golang.org/pkg/path/filepath/#Match + // + // Please ensure you understand the ramifications of enabling this. + // If used incorrectly your WebSocket server will be open to CSRF attacks. + // + // Do not use * as a pattern to allow any origin, prefer to use InsecureSkipVerify instead + // to bring attention to the danger of such a setting. + OriginPatterns []string + + // CompressionMode controls the compression mode. + // Defaults to CompressionNoContextTakeover. + // + // See docs on CompressionMode for details. + CompressionMode CompressionMode + + // CompressionThreshold controls the minimum size of a message before compression is applied. + // + // Defaults to 512 bytes for CompressionNoContextTakeover and 128 bytes + // for CompressionContextTakeover. + CompressionThreshold int +} + +// Accept accepts a WebSocket handshake from a client and upgrades the +// the connection to a WebSocket. +// +// Accept will not allow cross origin requests by default. +// See the InsecureSkipVerify and OriginPatterns options to allow cross origin requests. +// +// Accept will write a response to w on all errors. +func Accept(w http.ResponseWriter, r *http.Request, opts *AcceptOptions) (*Conn, error) { + return accept(w, r, opts) +} + +func accept(w http.ResponseWriter, r *http.Request, opts *AcceptOptions) (_ *Conn, err error) { + defer errd.Wrap(&err, "failed to accept WebSocket connection") + + if opts == nil { + opts = &AcceptOptions{} + } + opts = &*opts + + errCode, err := verifyClientRequest(w, r) + if err != nil { + http.Error(w, err.Error(), errCode) + return nil, err + } + + if !opts.InsecureSkipVerify { + err = authenticateOrigin(r, opts.OriginPatterns) + if err != nil { + if errors.Is(err, filepath.ErrBadPattern) { + log.Printf("websocket: %v", err) + err = errors.New(http.StatusText(http.StatusForbidden)) + } + http.Error(w, err.Error(), http.StatusForbidden) + return nil, err + } + } + + hj, ok := w.(http.Hijacker) + if !ok { + err = errors.New("http.ResponseWriter does not implement http.Hijacker") + http.Error(w, http.StatusText(http.StatusNotImplemented), http.StatusNotImplemented) + return nil, err + } + + w.Header().Set("Upgrade", "websocket") + w.Header().Set("Connection", "Upgrade") + + key := r.Header.Get("Sec-WebSocket-Key") + w.Header().Set("Sec-WebSocket-Accept", secWebSocketAccept(key)) + + subproto := selectSubprotocol(r, opts.Subprotocols) + if subproto != "" { + w.Header().Set("Sec-WebSocket-Protocol", subproto) + } + + copts, err := acceptCompression(r, w, opts.CompressionMode) + if err != nil { + return nil, err + } + + w.WriteHeader(http.StatusSwitchingProtocols) + // See https://github.com/nhooyr/websocket/issues/166 + if ginWriter, ok := w.(interface { + WriteHeaderNow() + }); ok { + ginWriter.WriteHeaderNow() + } + + netConn, brw, err := hj.Hijack() + if err != nil { + err = fmt.Errorf("failed to hijack connection: %w", err) + http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError) + return nil, err + } + + // https://github.com/golang/go/issues/32314 + b, _ := brw.Reader.Peek(brw.Reader.Buffered()) + brw.Reader.Reset(io.MultiReader(bytes.NewReader(b), netConn)) + + return newConn(connConfig{ + subprotocol: w.Header().Get("Sec-WebSocket-Protocol"), + rwc: netConn, + client: false, + copts: copts, + flateThreshold: opts.CompressionThreshold, + + br: brw.Reader, + bw: brw.Writer, + }), nil +} + +func verifyClientRequest(w http.ResponseWriter, r *http.Request) (errCode int, _ error) { + if !r.ProtoAtLeast(1, 1) { + return http.StatusUpgradeRequired, fmt.Errorf("WebSocket protocol violation: handshake request must be at least HTTP/1.1: %q", r.Proto) + } + + if !headerContainsTokenIgnoreCase(r.Header, "Connection", "Upgrade") { + w.Header().Set("Connection", "Upgrade") + w.Header().Set("Upgrade", "websocket") + return http.StatusUpgradeRequired, fmt.Errorf("WebSocket protocol violation: Connection header %q does not contain Upgrade", r.Header.Get("Connection")) + } + + if !headerContainsTokenIgnoreCase(r.Header, "Upgrade", "websocket") { + w.Header().Set("Connection", "Upgrade") + w.Header().Set("Upgrade", "websocket") + return http.StatusUpgradeRequired, fmt.Errorf("WebSocket protocol violation: Upgrade header %q does not contain websocket", r.Header.Get("Upgrade")) + } + + if r.Method != "GET" { + return http.StatusMethodNotAllowed, fmt.Errorf("WebSocket protocol violation: handshake request method is not GET but %q", r.Method) + } + + if r.Header.Get("Sec-WebSocket-Version") != "13" { + w.Header().Set("Sec-WebSocket-Version", "13") + return http.StatusBadRequest, fmt.Errorf("unsupported WebSocket protocol version (only 13 is supported): %q", r.Header.Get("Sec-WebSocket-Version")) + } + + if r.Header.Get("Sec-WebSocket-Key") == "" { + return http.StatusBadRequest, errors.New("WebSocket protocol violation: missing Sec-WebSocket-Key") + } + + return 0, nil +} + +func authenticateOrigin(r *http.Request, originHosts []string) error { + origin := r.Header.Get("Origin") + if origin == "" { + return nil + } + + u, err := url.Parse(origin) + if err != nil { + return fmt.Errorf("failed to parse Origin header %q: %w", origin, err) + } + + if strings.EqualFold(r.Host, u.Host) { + return nil + } + + for _, hostPattern := range originHosts { + matched, err := match(hostPattern, u.Host) + if err != nil { + return fmt.Errorf("failed to parse filepath pattern %q: %w", hostPattern, err) + } + if matched { + return nil + } + } + return fmt.Errorf("request Origin %q is not authorized for Host %q", origin, r.Host) +} + +func match(pattern, s string) (bool, error) { + return filepath.Match(strings.ToLower(pattern), strings.ToLower(s)) +} + +func selectSubprotocol(r *http.Request, subprotocols []string) string { + cps := headerTokens(r.Header, "Sec-WebSocket-Protocol") + for _, sp := range subprotocols { + for _, cp := range cps { + if strings.EqualFold(sp, cp) { + return cp + } + } + } + return "" +} + +func acceptCompression(r *http.Request, w http.ResponseWriter, mode CompressionMode) (*compressionOptions, error) { + if mode == CompressionDisabled { + return nil, nil + } + + for _, ext := range websocketExtensions(r.Header) { + switch ext.name { + case "permessage-deflate": + return acceptDeflate(w, ext, mode) + // Disabled for now, see https://github.com/nhooyr/websocket/issues/218 + // case "x-webkit-deflate-frame": + // return acceptWebkitDeflate(w, ext, mode) + } + } + return nil, nil +} + +func acceptDeflate(w http.ResponseWriter, ext websocketExtension, mode CompressionMode) (*compressionOptions, error) { + copts := mode.opts() + + for _, p := range ext.params { + switch p { + case "client_no_context_takeover": + copts.clientNoContextTakeover = true + continue + case "server_no_context_takeover": + copts.serverNoContextTakeover = true + continue + } + + if strings.HasPrefix(p, "client_max_window_bits") { + // We cannot adjust the read sliding window so cannot make use of this. + continue + } + + err := fmt.Errorf("unsupported permessage-deflate parameter: %q", p) + http.Error(w, err.Error(), http.StatusBadRequest) + return nil, err + } + + copts.setHeader(w.Header()) + + return copts, nil +} + +func acceptWebkitDeflate(w http.ResponseWriter, ext websocketExtension, mode CompressionMode) (*compressionOptions, error) { + copts := mode.opts() + // The peer must explicitly request it. + copts.serverNoContextTakeover = false + + for _, p := range ext.params { + if p == "no_context_takeover" { + copts.serverNoContextTakeover = true + continue + } + + // We explicitly fail on x-webkit-deflate-frame's max_window_bits parameter instead + // of ignoring it as the draft spec is unclear. It says the server can ignore it + // but the server has no way of signalling to the client it was ignored as the parameters + // are set one way. + // Thus us ignoring it would make the client think we understood it which would cause issues. + // See https://tools.ietf.org/html/draft-tyoshino-hybi-websocket-perframe-deflate-06#section-4.1 + // + // Either way, we're only implementing this for webkit which never sends the max_window_bits + // parameter so we don't need to worry about it. + err := fmt.Errorf("unsupported x-webkit-deflate-frame parameter: %q", p) + http.Error(w, err.Error(), http.StatusBadRequest) + return nil, err + } + + s := "x-webkit-deflate-frame" + if copts.clientNoContextTakeover { + s += "; no_context_takeover" + } + w.Header().Set("Sec-WebSocket-Extensions", s) + + return copts, nil +} + +func headerContainsTokenIgnoreCase(h http.Header, key, token string) bool { + for _, t := range headerTokens(h, key) { + if strings.EqualFold(t, token) { + return true + } + } + return false +} + +type websocketExtension struct { + name string + params []string +} + +func websocketExtensions(h http.Header) []websocketExtension { + var exts []websocketExtension + extStrs := headerTokens(h, "Sec-WebSocket-Extensions") + for _, extStr := range extStrs { + if extStr == "" { + continue + } + + vals := strings.Split(extStr, ";") + for i := range vals { + vals[i] = strings.TrimSpace(vals[i]) + } + + e := websocketExtension{ + name: vals[0], + params: vals[1:], + } + + exts = append(exts, e) + } + return exts +} + +func headerTokens(h http.Header, key string) []string { + key = textproto.CanonicalMIMEHeaderKey(key) + var tokens []string + for _, v := range h[key] { + v = strings.TrimSpace(v) + for _, t := range strings.Split(v, ",") { + t = strings.TrimSpace(t) + tokens = append(tokens, t) + } + } + return tokens +} + +var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11") + +func secWebSocketAccept(secWebSocketKey string) string { + h := sha1.New() + h.Write([]byte(secWebSocketKey)) + h.Write(keyGUID) + + return base64.StdEncoding.EncodeToString(h.Sum(nil)) +} diff --git a/vendor/nhooyr.io/websocket/accept_js.go b/vendor/nhooyr.io/websocket/accept_js.go index daad4b79fe..9399ab6618 100644 --- a/vendor/nhooyr.io/websocket/accept_js.go +++ b/vendor/nhooyr.io/websocket/accept_js.go @@ -1,20 +1,20 @@ -package websocket - -import ( - "errors" - "net/http" -) - -// AcceptOptions represents Accept's options. -type AcceptOptions struct { - Subprotocols []string - InsecureSkipVerify bool - OriginPatterns []string - CompressionMode CompressionMode - CompressionThreshold int -} - -// Accept is stubbed out for Wasm. -func Accept(w http.ResponseWriter, r *http.Request, opts *AcceptOptions) (*Conn, error) { - return nil, errors.New("unimplemented") -} +package websocket + +import ( + "errors" + "net/http" +) + +// AcceptOptions represents Accept's options. +type AcceptOptions struct { + Subprotocols []string + InsecureSkipVerify bool + OriginPatterns []string + CompressionMode CompressionMode + CompressionThreshold int +} + +// Accept is stubbed out for Wasm. +func Accept(w http.ResponseWriter, r *http.Request, opts *AcceptOptions) (*Conn, error) { + return nil, errors.New("unimplemented") +} diff --git a/vendor/nhooyr.io/websocket/close.go b/vendor/nhooyr.io/websocket/close.go index 7cbc19e9de..6ccb6ef959 100644 --- a/vendor/nhooyr.io/websocket/close.go +++ b/vendor/nhooyr.io/websocket/close.go @@ -1,76 +1,76 @@ -package websocket - -import ( - "errors" - "fmt" -) - -// StatusCode represents a WebSocket status code. -// https://tools.ietf.org/html/rfc6455#section-7.4 -type StatusCode int - -// https://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number -// -// These are only the status codes defined by the protocol. -// -// You can define custom codes in the 3000-4999 range. -// The 3000-3999 range is reserved for use by libraries, frameworks and applications. -// The 4000-4999 range is reserved for private use. -const ( - StatusNormalClosure StatusCode = 1000 - StatusGoingAway StatusCode = 1001 - StatusProtocolError StatusCode = 1002 - StatusUnsupportedData StatusCode = 1003 - - // 1004 is reserved and so unexported. - statusReserved StatusCode = 1004 - - // StatusNoStatusRcvd cannot be sent in a close message. - // It is reserved for when a close message is received without - // a status code. - StatusNoStatusRcvd StatusCode = 1005 - - // StatusAbnormalClosure is exported for use only with Wasm. - // In non Wasm Go, the returned error will indicate whether the - // connection was closed abnormally. - StatusAbnormalClosure StatusCode = 1006 - - StatusInvalidFramePayloadData StatusCode = 1007 - StatusPolicyViolation StatusCode = 1008 - StatusMessageTooBig StatusCode = 1009 - StatusMandatoryExtension StatusCode = 1010 - StatusInternalError StatusCode = 1011 - StatusServiceRestart StatusCode = 1012 - StatusTryAgainLater StatusCode = 1013 - StatusBadGateway StatusCode = 1014 - - // StatusTLSHandshake is only exported for use with Wasm. - // In non Wasm Go, the returned error will indicate whether there was - // a TLS handshake failure. - StatusTLSHandshake StatusCode = 1015 -) - -// CloseError is returned when the connection is closed with a status and reason. -// -// Use Go 1.13's errors.As to check for this error. -// Also see the CloseStatus helper. -type CloseError struct { - Code StatusCode - Reason string -} - -func (ce CloseError) Error() string { - return fmt.Sprintf("status = %v and reason = %q", ce.Code, ce.Reason) -} - -// CloseStatus is a convenience wrapper around Go 1.13's errors.As to grab -// the status code from a CloseError. -// -// -1 will be returned if the passed error is nil or not a CloseError. -func CloseStatus(err error) StatusCode { - var ce CloseError - if errors.As(err, &ce) { - return ce.Code - } - return -1 -} +package websocket + +import ( + "errors" + "fmt" +) + +// StatusCode represents a WebSocket status code. +// https://tools.ietf.org/html/rfc6455#section-7.4 +type StatusCode int + +// https://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number +// +// These are only the status codes defined by the protocol. +// +// You can define custom codes in the 3000-4999 range. +// The 3000-3999 range is reserved for use by libraries, frameworks and applications. +// The 4000-4999 range is reserved for private use. +const ( + StatusNormalClosure StatusCode = 1000 + StatusGoingAway StatusCode = 1001 + StatusProtocolError StatusCode = 1002 + StatusUnsupportedData StatusCode = 1003 + + // 1004 is reserved and so unexported. + statusReserved StatusCode = 1004 + + // StatusNoStatusRcvd cannot be sent in a close message. + // It is reserved for when a close message is received without + // a status code. + StatusNoStatusRcvd StatusCode = 1005 + + // StatusAbnormalClosure is exported for use only with Wasm. + // In non Wasm Go, the returned error will indicate whether the + // connection was closed abnormally. + StatusAbnormalClosure StatusCode = 1006 + + StatusInvalidFramePayloadData StatusCode = 1007 + StatusPolicyViolation StatusCode = 1008 + StatusMessageTooBig StatusCode = 1009 + StatusMandatoryExtension StatusCode = 1010 + StatusInternalError StatusCode = 1011 + StatusServiceRestart StatusCode = 1012 + StatusTryAgainLater StatusCode = 1013 + StatusBadGateway StatusCode = 1014 + + // StatusTLSHandshake is only exported for use with Wasm. + // In non Wasm Go, the returned error will indicate whether there was + // a TLS handshake failure. + StatusTLSHandshake StatusCode = 1015 +) + +// CloseError is returned when the connection is closed with a status and reason. +// +// Use Go 1.13's errors.As to check for this error. +// Also see the CloseStatus helper. +type CloseError struct { + Code StatusCode + Reason string +} + +func (ce CloseError) Error() string { + return fmt.Sprintf("status = %v and reason = %q", ce.Code, ce.Reason) +} + +// CloseStatus is a convenience wrapper around Go 1.13's errors.As to grab +// the status code from a CloseError. +// +// -1 will be returned if the passed error is nil or not a CloseError. +func CloseStatus(err error) StatusCode { + var ce CloseError + if errors.As(err, &ce) { + return ce.Code + } + return -1 +} diff --git a/vendor/nhooyr.io/websocket/close_notjs.go b/vendor/nhooyr.io/websocket/close_notjs.go index 4251311d2e..9cbb7f0952 100644 --- a/vendor/nhooyr.io/websocket/close_notjs.go +++ b/vendor/nhooyr.io/websocket/close_notjs.go @@ -1,211 +1,211 @@ -// +build !js - -package websocket - -import ( - "context" - "encoding/binary" - "errors" - "fmt" - "log" - "time" - - "nhooyr.io/websocket/internal/errd" -) - -// Close performs the WebSocket close handshake with the given status code and reason. -// -// It will write a WebSocket close frame with a timeout of 5s and then wait 5s for -// the peer to send a close frame. -// All data messages received from the peer during the close handshake will be discarded. -// -// The connection can only be closed once. Additional calls to Close -// are no-ops. -// -// The maximum length of reason must be 125 bytes. Avoid -// sending a dynamic reason. -// -// Close will unblock all goroutines interacting with the connection once -// complete. -func (c *Conn) Close(code StatusCode, reason string) error { - return c.closeHandshake(code, reason) -} - -func (c *Conn) closeHandshake(code StatusCode, reason string) (err error) { - defer errd.Wrap(&err, "failed to close WebSocket") - - writeErr := c.writeClose(code, reason) - closeHandshakeErr := c.waitCloseHandshake() - - if writeErr != nil { - return writeErr - } - - if CloseStatus(closeHandshakeErr) == -1 { - return closeHandshakeErr - } - - return nil -} - -var errAlreadyWroteClose = errors.New("already wrote close") - -func (c *Conn) writeClose(code StatusCode, reason string) error { - c.closeMu.Lock() - wroteClose := c.wroteClose - c.wroteClose = true - c.closeMu.Unlock() - if wroteClose { - return errAlreadyWroteClose - } - - ce := CloseError{ - Code: code, - Reason: reason, - } - - var p []byte - var marshalErr error - if ce.Code != StatusNoStatusRcvd { - p, marshalErr = ce.bytes() - if marshalErr != nil { - log.Printf("websocket: %v", marshalErr) - } - } - - writeErr := c.writeControl(context.Background(), opClose, p) - if CloseStatus(writeErr) != -1 { - // Not a real error if it's due to a close frame being received. - writeErr = nil - } - - // We do this after in case there was an error writing the close frame. - c.setCloseErr(fmt.Errorf("sent close frame: %w", ce)) - - if marshalErr != nil { - return marshalErr - } - return writeErr -} - -func (c *Conn) waitCloseHandshake() error { - defer c.close(nil) - - ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) - defer cancel() - - err := c.readMu.lock(ctx) - if err != nil { - return err - } - defer c.readMu.unlock() - - if c.readCloseFrameErr != nil { - return c.readCloseFrameErr - } - - for { - h, err := c.readLoop(ctx) - if err != nil { - return err - } - - for i := int64(0); i < h.payloadLength; i++ { - _, err := c.br.ReadByte() - if err != nil { - return err - } - } - } -} - -func parseClosePayload(p []byte) (CloseError, error) { - if len(p) == 0 { - return CloseError{ - Code: StatusNoStatusRcvd, - }, nil - } - - if len(p) < 2 { - return CloseError{}, fmt.Errorf("close payload %q too small, cannot even contain the 2 byte status code", p) - } - - ce := CloseError{ - Code: StatusCode(binary.BigEndian.Uint16(p)), - Reason: string(p[2:]), - } - - if !validWireCloseCode(ce.Code) { - return CloseError{}, fmt.Errorf("invalid status code %v", ce.Code) - } - - return ce, nil -} - -// See http://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number -// and https://tools.ietf.org/html/rfc6455#section-7.4.1 -func validWireCloseCode(code StatusCode) bool { - switch code { - case statusReserved, StatusNoStatusRcvd, StatusAbnormalClosure, StatusTLSHandshake: - return false - } - - if code >= StatusNormalClosure && code <= StatusBadGateway { - return true - } - if code >= 3000 && code <= 4999 { - return true - } - - return false -} - -func (ce CloseError) bytes() ([]byte, error) { - p, err := ce.bytesErr() - if err != nil { - err = fmt.Errorf("failed to marshal close frame: %w", err) - ce = CloseError{ - Code: StatusInternalError, - } - p, _ = ce.bytesErr() - } - return p, err -} - -const maxCloseReason = maxControlPayload - 2 - -func (ce CloseError) bytesErr() ([]byte, error) { - if len(ce.Reason) > maxCloseReason { - return nil, fmt.Errorf("reason string max is %v but got %q with length %v", maxCloseReason, ce.Reason, len(ce.Reason)) - } - - if !validWireCloseCode(ce.Code) { - return nil, fmt.Errorf("status code %v cannot be set", ce.Code) - } - - buf := make([]byte, 2+len(ce.Reason)) - binary.BigEndian.PutUint16(buf, uint16(ce.Code)) - copy(buf[2:], ce.Reason) - return buf, nil -} - -func (c *Conn) setCloseErr(err error) { - c.closeMu.Lock() - c.setCloseErrLocked(err) - c.closeMu.Unlock() -} - -func (c *Conn) setCloseErrLocked(err error) { - if c.closeErr == nil { - c.closeErr = fmt.Errorf("WebSocket closed: %w", err) - } -} - -func (c *Conn) isClosed() bool { - select { - case <-c.closed: - return true - default: - return false - } -} +// +build !js + +package websocket + +import ( + "context" + "encoding/binary" + "errors" + "fmt" + "log" + "time" + + "nhooyr.io/websocket/internal/errd" +) + +// Close performs the WebSocket close handshake with the given status code and reason. +// +// It will write a WebSocket close frame with a timeout of 5s and then wait 5s for +// the peer to send a close frame. +// All data messages received from the peer during the close handshake will be discarded. +// +// The connection can only be closed once. Additional calls to Close +// are no-ops. +// +// The maximum length of reason must be 125 bytes. Avoid +// sending a dynamic reason. +// +// Close will unblock all goroutines interacting with the connection once +// complete. +func (c *Conn) Close(code StatusCode, reason string) error { + return c.closeHandshake(code, reason) +} + +func (c *Conn) closeHandshake(code StatusCode, reason string) (err error) { + defer errd.Wrap(&err, "failed to close WebSocket") + + writeErr := c.writeClose(code, reason) + closeHandshakeErr := c.waitCloseHandshake() + + if writeErr != nil { + return writeErr + } + + if CloseStatus(closeHandshakeErr) == -1 { + return closeHandshakeErr + } + + return nil +} + +var errAlreadyWroteClose = errors.New("already wrote close") + +func (c *Conn) writeClose(code StatusCode, reason string) error { + c.closeMu.Lock() + wroteClose := c.wroteClose + c.wroteClose = true + c.closeMu.Unlock() + if wroteClose { + return errAlreadyWroteClose + } + + ce := CloseError{ + Code: code, + Reason: reason, + } + + var p []byte + var marshalErr error + if ce.Code != StatusNoStatusRcvd { + p, marshalErr = ce.bytes() + if marshalErr != nil { + log.Printf("websocket: %v", marshalErr) + } + } + + writeErr := c.writeControl(context.Background(), opClose, p) + if CloseStatus(writeErr) != -1 { + // Not a real error if it's due to a close frame being received. + writeErr = nil + } + + // We do this after in case there was an error writing the close frame. + c.setCloseErr(fmt.Errorf("sent close frame: %w", ce)) + + if marshalErr != nil { + return marshalErr + } + return writeErr +} + +func (c *Conn) waitCloseHandshake() error { + defer c.close(nil) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + + err := c.readMu.lock(ctx) + if err != nil { + return err + } + defer c.readMu.unlock() + + if c.readCloseFrameErr != nil { + return c.readCloseFrameErr + } + + for { + h, err := c.readLoop(ctx) + if err != nil { + return err + } + + for i := int64(0); i < h.payloadLength; i++ { + _, err := c.br.ReadByte() + if err != nil { + return err + } + } + } +} + +func parseClosePayload(p []byte) (CloseError, error) { + if len(p) == 0 { + return CloseError{ + Code: StatusNoStatusRcvd, + }, nil + } + + if len(p) < 2 { + return CloseError{}, fmt.Errorf("close payload %q too small, cannot even contain the 2 byte status code", p) + } + + ce := CloseError{ + Code: StatusCode(binary.BigEndian.Uint16(p)), + Reason: string(p[2:]), + } + + if !validWireCloseCode(ce.Code) { + return CloseError{}, fmt.Errorf("invalid status code %v", ce.Code) + } + + return ce, nil +} + +// See http://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number +// and https://tools.ietf.org/html/rfc6455#section-7.4.1 +func validWireCloseCode(code StatusCode) bool { + switch code { + case statusReserved, StatusNoStatusRcvd, StatusAbnormalClosure, StatusTLSHandshake: + return false + } + + if code >= StatusNormalClosure && code <= StatusBadGateway { + return true + } + if code >= 3000 && code <= 4999 { + return true + } + + return false +} + +func (ce CloseError) bytes() ([]byte, error) { + p, err := ce.bytesErr() + if err != nil { + err = fmt.Errorf("failed to marshal close frame: %w", err) + ce = CloseError{ + Code: StatusInternalError, + } + p, _ = ce.bytesErr() + } + return p, err +} + +const maxCloseReason = maxControlPayload - 2 + +func (ce CloseError) bytesErr() ([]byte, error) { + if len(ce.Reason) > maxCloseReason { + return nil, fmt.Errorf("reason string max is %v but got %q with length %v", maxCloseReason, ce.Reason, len(ce.Reason)) + } + + if !validWireCloseCode(ce.Code) { + return nil, fmt.Errorf("status code %v cannot be set", ce.Code) + } + + buf := make([]byte, 2+len(ce.Reason)) + binary.BigEndian.PutUint16(buf, uint16(ce.Code)) + copy(buf[2:], ce.Reason) + return buf, nil +} + +func (c *Conn) setCloseErr(err error) { + c.closeMu.Lock() + c.setCloseErrLocked(err) + c.closeMu.Unlock() +} + +func (c *Conn) setCloseErrLocked(err error) { + if c.closeErr == nil { + c.closeErr = fmt.Errorf("WebSocket closed: %w", err) + } +} + +func (c *Conn) isClosed() bool { + select { + case <-c.closed: + return true + default: + return false + } +} diff --git a/vendor/nhooyr.io/websocket/compress.go b/vendor/nhooyr.io/websocket/compress.go index 80b46d1c1d..81551eb3f9 100644 --- a/vendor/nhooyr.io/websocket/compress.go +++ b/vendor/nhooyr.io/websocket/compress.go @@ -1,39 +1,39 @@ -package websocket - -// CompressionMode represents the modes available to the deflate extension. -// See https://tools.ietf.org/html/rfc7692 -// -// A compatibility layer is implemented for the older deflate-frame extension used -// by safari. See https://tools.ietf.org/html/draft-tyoshino-hybi-websocket-perframe-deflate-06 -// It will work the same in every way except that we cannot signal to the peer we -// want to use no context takeover on our side, we can only signal that they should. -// It is however currently disabled due to Safari bugs. See https://github.com/nhooyr/websocket/issues/218 -type CompressionMode int - -const ( - // CompressionNoContextTakeover grabs a new flate.Reader and flate.Writer as needed - // for every message. This applies to both server and client side. - // - // This means less efficient compression as the sliding window from previous messages - // will not be used but the memory overhead will be lower if the connections - // are long lived and seldom used. - // - // The message will only be compressed if greater than 512 bytes. - CompressionNoContextTakeover CompressionMode = iota - - // CompressionContextTakeover uses a flate.Reader and flate.Writer per connection. - // This enables reusing the sliding window from previous messages. - // As most WebSocket protocols are repetitive, this can be very efficient. - // It carries an overhead of 8 kB for every connection compared to CompressionNoContextTakeover. - // - // If the peer negotiates NoContextTakeover on the client or server side, it will be - // used instead as this is required by the RFC. - CompressionContextTakeover - - // CompressionDisabled disables the deflate extension. - // - // Use this if you are using a predominantly binary protocol with very - // little duplication in between messages or CPU and memory are more - // important than bandwidth. - CompressionDisabled -) +package websocket + +// CompressionMode represents the modes available to the deflate extension. +// See https://tools.ietf.org/html/rfc7692 +// +// A compatibility layer is implemented for the older deflate-frame extension used +// by safari. See https://tools.ietf.org/html/draft-tyoshino-hybi-websocket-perframe-deflate-06 +// It will work the same in every way except that we cannot signal to the peer we +// want to use no context takeover on our side, we can only signal that they should. +// It is however currently disabled due to Safari bugs. See https://github.com/nhooyr/websocket/issues/218 +type CompressionMode int + +const ( + // CompressionNoContextTakeover grabs a new flate.Reader and flate.Writer as needed + // for every message. This applies to both server and client side. + // + // This means less efficient compression as the sliding window from previous messages + // will not be used but the memory overhead will be lower if the connections + // are long lived and seldom used. + // + // The message will only be compressed if greater than 512 bytes. + CompressionNoContextTakeover CompressionMode = iota + + // CompressionContextTakeover uses a flate.Reader and flate.Writer per connection. + // This enables reusing the sliding window from previous messages. + // As most WebSocket protocols are repetitive, this can be very efficient. + // It carries an overhead of 8 kB for every connection compared to CompressionNoContextTakeover. + // + // If the peer negotiates NoContextTakeover on the client or server side, it will be + // used instead as this is required by the RFC. + CompressionContextTakeover + + // CompressionDisabled disables the deflate extension. + // + // Use this if you are using a predominantly binary protocol with very + // little duplication in between messages or CPU and memory are more + // important than bandwidth. + CompressionDisabled +) diff --git a/vendor/nhooyr.io/websocket/compress_notjs.go b/vendor/nhooyr.io/websocket/compress_notjs.go index 809a272c3d..e5bab39817 100644 --- a/vendor/nhooyr.io/websocket/compress_notjs.go +++ b/vendor/nhooyr.io/websocket/compress_notjs.go @@ -1,181 +1,181 @@ -// +build !js - -package websocket - -import ( - "io" - "net/http" - "sync" - - "github.com/klauspost/compress/flate" -) - -func (m CompressionMode) opts() *compressionOptions { - return &compressionOptions{ - clientNoContextTakeover: m == CompressionNoContextTakeover, - serverNoContextTakeover: m == CompressionNoContextTakeover, - } -} - -type compressionOptions struct { - clientNoContextTakeover bool - serverNoContextTakeover bool -} - -func (copts *compressionOptions) setHeader(h http.Header) { - s := "permessage-deflate" - if copts.clientNoContextTakeover { - s += "; client_no_context_takeover" - } - if copts.serverNoContextTakeover { - s += "; server_no_context_takeover" - } - h.Set("Sec-WebSocket-Extensions", s) -} - -// These bytes are required to get flate.Reader to return. -// They are removed when sending to avoid the overhead as -// WebSocket framing tell's when the message has ended but then -// we need to add them back otherwise flate.Reader keeps -// trying to return more bytes. -const deflateMessageTail = "\x00\x00\xff\xff" - -type trimLastFourBytesWriter struct { - w io.Writer - tail []byte -} - -func (tw *trimLastFourBytesWriter) reset() { - if tw != nil && tw.tail != nil { - tw.tail = tw.tail[:0] - } -} - -func (tw *trimLastFourBytesWriter) Write(p []byte) (int, error) { - if tw.tail == nil { - tw.tail = make([]byte, 0, 4) - } - - extra := len(tw.tail) + len(p) - 4 - - if extra <= 0 { - tw.tail = append(tw.tail, p...) - return len(p), nil - } - - // Now we need to write as many extra bytes as we can from the previous tail. - if extra > len(tw.tail) { - extra = len(tw.tail) - } - if extra > 0 { - _, err := tw.w.Write(tw.tail[:extra]) - if err != nil { - return 0, err - } - - // Shift remaining bytes in tail over. - n := copy(tw.tail, tw.tail[extra:]) - tw.tail = tw.tail[:n] - } - - // If p is less than or equal to 4 bytes, - // all of it is is part of the tail. - if len(p) <= 4 { - tw.tail = append(tw.tail, p...) - return len(p), nil - } - - // Otherwise, only the last 4 bytes are. - tw.tail = append(tw.tail, p[len(p)-4:]...) - - p = p[:len(p)-4] - n, err := tw.w.Write(p) - return n + 4, err -} - -var flateReaderPool sync.Pool - -func getFlateReader(r io.Reader, dict []byte) io.Reader { - fr, ok := flateReaderPool.Get().(io.Reader) - if !ok { - return flate.NewReaderDict(r, dict) - } - fr.(flate.Resetter).Reset(r, dict) - return fr -} - -func putFlateReader(fr io.Reader) { - flateReaderPool.Put(fr) -} - -type slidingWindow struct { - buf []byte -} - -var swPoolMu sync.RWMutex -var swPool = map[int]*sync.Pool{} - -func slidingWindowPool(n int) *sync.Pool { - swPoolMu.RLock() - p, ok := swPool[n] - swPoolMu.RUnlock() - if ok { - return p - } - - p = &sync.Pool{} - - swPoolMu.Lock() - swPool[n] = p - swPoolMu.Unlock() - - return p -} - -func (sw *slidingWindow) init(n int) { - if sw.buf != nil { - return - } - - if n == 0 { - n = 32768 - } - - p := slidingWindowPool(n) - buf, ok := p.Get().([]byte) - if ok { - sw.buf = buf[:0] - } else { - sw.buf = make([]byte, 0, n) - } -} - -func (sw *slidingWindow) close() { - if sw.buf == nil { - return - } - - swPoolMu.Lock() - swPool[cap(sw.buf)].Put(sw.buf) - swPoolMu.Unlock() - sw.buf = nil -} - -func (sw *slidingWindow) write(p []byte) { - if len(p) >= cap(sw.buf) { - sw.buf = sw.buf[:cap(sw.buf)] - p = p[len(p)-cap(sw.buf):] - copy(sw.buf, p) - return - } - - left := cap(sw.buf) - len(sw.buf) - if left < len(p) { - // We need to shift spaceNeeded bytes from the end to make room for p at the end. - spaceNeeded := len(p) - left - copy(sw.buf, sw.buf[spaceNeeded:]) - sw.buf = sw.buf[:len(sw.buf)-spaceNeeded] - } - - sw.buf = append(sw.buf, p...) -} +// +build !js + +package websocket + +import ( + "io" + "net/http" + "sync" + + "github.com/klauspost/compress/flate" +) + +func (m CompressionMode) opts() *compressionOptions { + return &compressionOptions{ + clientNoContextTakeover: m == CompressionNoContextTakeover, + serverNoContextTakeover: m == CompressionNoContextTakeover, + } +} + +type compressionOptions struct { + clientNoContextTakeover bool + serverNoContextTakeover bool +} + +func (copts *compressionOptions) setHeader(h http.Header) { + s := "permessage-deflate" + if copts.clientNoContextTakeover { + s += "; client_no_context_takeover" + } + if copts.serverNoContextTakeover { + s += "; server_no_context_takeover" + } + h.Set("Sec-WebSocket-Extensions", s) +} + +// These bytes are required to get flate.Reader to return. +// They are removed when sending to avoid the overhead as +// WebSocket framing tell's when the message has ended but then +// we need to add them back otherwise flate.Reader keeps +// trying to return more bytes. +const deflateMessageTail = "\x00\x00\xff\xff" + +type trimLastFourBytesWriter struct { + w io.Writer + tail []byte +} + +func (tw *trimLastFourBytesWriter) reset() { + if tw != nil && tw.tail != nil { + tw.tail = tw.tail[:0] + } +} + +func (tw *trimLastFourBytesWriter) Write(p []byte) (int, error) { + if tw.tail == nil { + tw.tail = make([]byte, 0, 4) + } + + extra := len(tw.tail) + len(p) - 4 + + if extra <= 0 { + tw.tail = append(tw.tail, p...) + return len(p), nil + } + + // Now we need to write as many extra bytes as we can from the previous tail. + if extra > len(tw.tail) { + extra = len(tw.tail) + } + if extra > 0 { + _, err := tw.w.Write(tw.tail[:extra]) + if err != nil { + return 0, err + } + + // Shift remaining bytes in tail over. + n := copy(tw.tail, tw.tail[extra:]) + tw.tail = tw.tail[:n] + } + + // If p is less than or equal to 4 bytes, + // all of it is is part of the tail. + if len(p) <= 4 { + tw.tail = append(tw.tail, p...) + return len(p), nil + } + + // Otherwise, only the last 4 bytes are. + tw.tail = append(tw.tail, p[len(p)-4:]...) + + p = p[:len(p)-4] + n, err := tw.w.Write(p) + return n + 4, err +} + +var flateReaderPool sync.Pool + +func getFlateReader(r io.Reader, dict []byte) io.Reader { + fr, ok := flateReaderPool.Get().(io.Reader) + if !ok { + return flate.NewReaderDict(r, dict) + } + fr.(flate.Resetter).Reset(r, dict) + return fr +} + +func putFlateReader(fr io.Reader) { + flateReaderPool.Put(fr) +} + +type slidingWindow struct { + buf []byte +} + +var swPoolMu sync.RWMutex +var swPool = map[int]*sync.Pool{} + +func slidingWindowPool(n int) *sync.Pool { + swPoolMu.RLock() + p, ok := swPool[n] + swPoolMu.RUnlock() + if ok { + return p + } + + p = &sync.Pool{} + + swPoolMu.Lock() + swPool[n] = p + swPoolMu.Unlock() + + return p +} + +func (sw *slidingWindow) init(n int) { + if sw.buf != nil { + return + } + + if n == 0 { + n = 32768 + } + + p := slidingWindowPool(n) + buf, ok := p.Get().([]byte) + if ok { + sw.buf = buf[:0] + } else { + sw.buf = make([]byte, 0, n) + } +} + +func (sw *slidingWindow) close() { + if sw.buf == nil { + return + } + + swPoolMu.Lock() + swPool[cap(sw.buf)].Put(sw.buf) + swPoolMu.Unlock() + sw.buf = nil +} + +func (sw *slidingWindow) write(p []byte) { + if len(p) >= cap(sw.buf) { + sw.buf = sw.buf[:cap(sw.buf)] + p = p[len(p)-cap(sw.buf):] + copy(sw.buf, p) + return + } + + left := cap(sw.buf) - len(sw.buf) + if left < len(p) { + // We need to shift spaceNeeded bytes from the end to make room for p at the end. + spaceNeeded := len(p) - left + copy(sw.buf, sw.buf[spaceNeeded:]) + sw.buf = sw.buf[:len(sw.buf)-spaceNeeded] + } + + sw.buf = append(sw.buf, p...) +} diff --git a/vendor/nhooyr.io/websocket/conn.go b/vendor/nhooyr.io/websocket/conn.go index a41808be3f..5b55fcf707 100644 --- a/vendor/nhooyr.io/websocket/conn.go +++ b/vendor/nhooyr.io/websocket/conn.go @@ -1,13 +1,13 @@ -package websocket - -// MessageType represents the type of a WebSocket message. -// See https://tools.ietf.org/html/rfc6455#section-5.6 -type MessageType int - -// MessageType constants. -const ( - // MessageText is for UTF-8 encoded text messages like JSON. - MessageText MessageType = iota + 1 - // MessageBinary is for binary messages like protobufs. - MessageBinary -) +package websocket + +// MessageType represents the type of a WebSocket message. +// See https://tools.ietf.org/html/rfc6455#section-5.6 +type MessageType int + +// MessageType constants. +const ( + // MessageText is for UTF-8 encoded text messages like JSON. + MessageText MessageType = iota + 1 + // MessageBinary is for binary messages like protobufs. + MessageBinary +) diff --git a/vendor/nhooyr.io/websocket/conn_notjs.go b/vendor/nhooyr.io/websocket/conn_notjs.go index 0c85ab7711..86d19901ef 100644 --- a/vendor/nhooyr.io/websocket/conn_notjs.go +++ b/vendor/nhooyr.io/websocket/conn_notjs.go @@ -1,265 +1,265 @@ -// +build !js - -package websocket - -import ( - "bufio" - "context" - "errors" - "fmt" - "io" - "runtime" - "strconv" - "sync" - "sync/atomic" -) - -// Conn represents a WebSocket connection. -// All methods may be called concurrently except for Reader and Read. -// -// You must always read from the connection. Otherwise control -// frames will not be handled. See Reader and CloseRead. -// -// Be sure to call Close on the connection when you -// are finished with it to release associated resources. -// -// On any error from any method, the connection is closed -// with an appropriate reason. -type Conn struct { - subprotocol string - rwc io.ReadWriteCloser - client bool - copts *compressionOptions - flateThreshold int - br *bufio.Reader - bw *bufio.Writer - - readTimeout chan context.Context - writeTimeout chan context.Context - - // Read state. - readMu *mu - readHeaderBuf [8]byte - readControlBuf [maxControlPayload]byte - msgReader *msgReader - readCloseFrameErr error - - // Write state. - msgWriterState *msgWriterState - writeFrameMu *mu - writeBuf []byte - writeHeaderBuf [8]byte - writeHeader header - - closed chan struct{} - closeMu sync.Mutex - closeErr error - wroteClose bool - - pingCounter int32 - activePingsMu sync.Mutex - activePings map[string]chan<- struct{} -} - -type connConfig struct { - subprotocol string - rwc io.ReadWriteCloser - client bool - copts *compressionOptions - flateThreshold int - - br *bufio.Reader - bw *bufio.Writer -} - -func newConn(cfg connConfig) *Conn { - c := &Conn{ - subprotocol: cfg.subprotocol, - rwc: cfg.rwc, - client: cfg.client, - copts: cfg.copts, - flateThreshold: cfg.flateThreshold, - - br: cfg.br, - bw: cfg.bw, - - readTimeout: make(chan context.Context), - writeTimeout: make(chan context.Context), - - closed: make(chan struct{}), - activePings: make(map[string]chan<- struct{}), - } - - c.readMu = newMu(c) - c.writeFrameMu = newMu(c) - - c.msgReader = newMsgReader(c) - - c.msgWriterState = newMsgWriterState(c) - if c.client { - c.writeBuf = extractBufioWriterBuf(c.bw, c.rwc) - } - - if c.flate() && c.flateThreshold == 0 { - c.flateThreshold = 128 - if !c.msgWriterState.flateContextTakeover() { - c.flateThreshold = 512 - } - } - - runtime.SetFinalizer(c, func(c *Conn) { - c.close(errors.New("connection garbage collected")) - }) - - go c.timeoutLoop() - - return c -} - -// Subprotocol returns the negotiated subprotocol. -// An empty string means the default protocol. -func (c *Conn) Subprotocol() string { - return c.subprotocol -} - -func (c *Conn) close(err error) { - c.closeMu.Lock() - defer c.closeMu.Unlock() - - if c.isClosed() { - return - } - c.setCloseErrLocked(err) - close(c.closed) - runtime.SetFinalizer(c, nil) - - // Have to close after c.closed is closed to ensure any goroutine that wakes up - // from the connection being closed also sees that c.closed is closed and returns - // closeErr. - c.rwc.Close() - - go func() { - c.msgWriterState.close() - - c.msgReader.close() - }() -} - -func (c *Conn) timeoutLoop() { - readCtx := context.Background() - writeCtx := context.Background() - - for { - select { - case <-c.closed: - return - - case writeCtx = <-c.writeTimeout: - case readCtx = <-c.readTimeout: - - case <-readCtx.Done(): - c.setCloseErr(fmt.Errorf("read timed out: %w", readCtx.Err())) - go c.writeError(StatusPolicyViolation, errors.New("timed out")) - case <-writeCtx.Done(): - c.close(fmt.Errorf("write timed out: %w", writeCtx.Err())) - return - } - } -} - -func (c *Conn) flate() bool { - return c.copts != nil -} - -// Ping sends a ping to the peer and waits for a pong. -// Use this to measure latency or ensure the peer is responsive. -// Ping must be called concurrently with Reader as it does -// not read from the connection but instead waits for a Reader call -// to read the pong. -// -// TCP Keepalives should suffice for most use cases. -func (c *Conn) Ping(ctx context.Context) error { - p := atomic.AddInt32(&c.pingCounter, 1) - - err := c.ping(ctx, strconv.Itoa(int(p))) - if err != nil { - return fmt.Errorf("failed to ping: %w", err) - } - return nil -} - -func (c *Conn) ping(ctx context.Context, p string) error { - pong := make(chan struct{}, 1) - - c.activePingsMu.Lock() - c.activePings[p] = pong - c.activePingsMu.Unlock() - - defer func() { - c.activePingsMu.Lock() - delete(c.activePings, p) - c.activePingsMu.Unlock() - }() - - err := c.writeControl(ctx, opPing, []byte(p)) - if err != nil { - return err - } - - select { - case <-c.closed: - return c.closeErr - case <-ctx.Done(): - err := fmt.Errorf("failed to wait for pong: %w", ctx.Err()) - c.close(err) - return err - case <-pong: - return nil - } -} - -type mu struct { - c *Conn - ch chan struct{} -} - -func newMu(c *Conn) *mu { - return &mu{ - c: c, - ch: make(chan struct{}, 1), - } -} - -func (m *mu) forceLock() { - m.ch <- struct{}{} -} - -func (m *mu) lock(ctx context.Context) error { - select { - case <-m.c.closed: - return m.c.closeErr - case <-ctx.Done(): - err := fmt.Errorf("failed to acquire lock: %w", ctx.Err()) - m.c.close(err) - return err - case m.ch <- struct{}{}: - // To make sure the connection is certainly alive. - // As it's possible the send on m.ch was selected - // over the receive on closed. - select { - case <-m.c.closed: - // Make sure to release. - m.unlock() - return m.c.closeErr - default: - } - return nil - } -} - -func (m *mu) unlock() { - select { - case <-m.ch: - default: - } -} +// +build !js + +package websocket + +import ( + "bufio" + "context" + "errors" + "fmt" + "io" + "runtime" + "strconv" + "sync" + "sync/atomic" +) + +// Conn represents a WebSocket connection. +// All methods may be called concurrently except for Reader and Read. +// +// You must always read from the connection. Otherwise control +// frames will not be handled. See Reader and CloseRead. +// +// Be sure to call Close on the connection when you +// are finished with it to release associated resources. +// +// On any error from any method, the connection is closed +// with an appropriate reason. +type Conn struct { + subprotocol string + rwc io.ReadWriteCloser + client bool + copts *compressionOptions + flateThreshold int + br *bufio.Reader + bw *bufio.Writer + + readTimeout chan context.Context + writeTimeout chan context.Context + + // Read state. + readMu *mu + readHeaderBuf [8]byte + readControlBuf [maxControlPayload]byte + msgReader *msgReader + readCloseFrameErr error + + // Write state. + msgWriterState *msgWriterState + writeFrameMu *mu + writeBuf []byte + writeHeaderBuf [8]byte + writeHeader header + + closed chan struct{} + closeMu sync.Mutex + closeErr error + wroteClose bool + + pingCounter int32 + activePingsMu sync.Mutex + activePings map[string]chan<- struct{} +} + +type connConfig struct { + subprotocol string + rwc io.ReadWriteCloser + client bool + copts *compressionOptions + flateThreshold int + + br *bufio.Reader + bw *bufio.Writer +} + +func newConn(cfg connConfig) *Conn { + c := &Conn{ + subprotocol: cfg.subprotocol, + rwc: cfg.rwc, + client: cfg.client, + copts: cfg.copts, + flateThreshold: cfg.flateThreshold, + + br: cfg.br, + bw: cfg.bw, + + readTimeout: make(chan context.Context), + writeTimeout: make(chan context.Context), + + closed: make(chan struct{}), + activePings: make(map[string]chan<- struct{}), + } + + c.readMu = newMu(c) + c.writeFrameMu = newMu(c) + + c.msgReader = newMsgReader(c) + + c.msgWriterState = newMsgWriterState(c) + if c.client { + c.writeBuf = extractBufioWriterBuf(c.bw, c.rwc) + } + + if c.flate() && c.flateThreshold == 0 { + c.flateThreshold = 128 + if !c.msgWriterState.flateContextTakeover() { + c.flateThreshold = 512 + } + } + + runtime.SetFinalizer(c, func(c *Conn) { + c.close(errors.New("connection garbage collected")) + }) + + go c.timeoutLoop() + + return c +} + +// Subprotocol returns the negotiated subprotocol. +// An empty string means the default protocol. +func (c *Conn) Subprotocol() string { + return c.subprotocol +} + +func (c *Conn) close(err error) { + c.closeMu.Lock() + defer c.closeMu.Unlock() + + if c.isClosed() { + return + } + c.setCloseErrLocked(err) + close(c.closed) + runtime.SetFinalizer(c, nil) + + // Have to close after c.closed is closed to ensure any goroutine that wakes up + // from the connection being closed also sees that c.closed is closed and returns + // closeErr. + c.rwc.Close() + + go func() { + c.msgWriterState.close() + + c.msgReader.close() + }() +} + +func (c *Conn) timeoutLoop() { + readCtx := context.Background() + writeCtx := context.Background() + + for { + select { + case <-c.closed: + return + + case writeCtx = <-c.writeTimeout: + case readCtx = <-c.readTimeout: + + case <-readCtx.Done(): + c.setCloseErr(fmt.Errorf("read timed out: %w", readCtx.Err())) + go c.writeError(StatusPolicyViolation, errors.New("timed out")) + case <-writeCtx.Done(): + c.close(fmt.Errorf("write timed out: %w", writeCtx.Err())) + return + } + } +} + +func (c *Conn) flate() bool { + return c.copts != nil +} + +// Ping sends a ping to the peer and waits for a pong. +// Use this to measure latency or ensure the peer is responsive. +// Ping must be called concurrently with Reader as it does +// not read from the connection but instead waits for a Reader call +// to read the pong. +// +// TCP Keepalives should suffice for most use cases. +func (c *Conn) Ping(ctx context.Context) error { + p := atomic.AddInt32(&c.pingCounter, 1) + + err := c.ping(ctx, strconv.Itoa(int(p))) + if err != nil { + return fmt.Errorf("failed to ping: %w", err) + } + return nil +} + +func (c *Conn) ping(ctx context.Context, p string) error { + pong := make(chan struct{}, 1) + + c.activePingsMu.Lock() + c.activePings[p] = pong + c.activePingsMu.Unlock() + + defer func() { + c.activePingsMu.Lock() + delete(c.activePings, p) + c.activePingsMu.Unlock() + }() + + err := c.writeControl(ctx, opPing, []byte(p)) + if err != nil { + return err + } + + select { + case <-c.closed: + return c.closeErr + case <-ctx.Done(): + err := fmt.Errorf("failed to wait for pong: %w", ctx.Err()) + c.close(err) + return err + case <-pong: + return nil + } +} + +type mu struct { + c *Conn + ch chan struct{} +} + +func newMu(c *Conn) *mu { + return &mu{ + c: c, + ch: make(chan struct{}, 1), + } +} + +func (m *mu) forceLock() { + m.ch <- struct{}{} +} + +func (m *mu) lock(ctx context.Context) error { + select { + case <-m.c.closed: + return m.c.closeErr + case <-ctx.Done(): + err := fmt.Errorf("failed to acquire lock: %w", ctx.Err()) + m.c.close(err) + return err + case m.ch <- struct{}{}: + // To make sure the connection is certainly alive. + // As it's possible the send on m.ch was selected + // over the receive on closed. + select { + case <-m.c.closed: + // Make sure to release. + m.unlock() + return m.c.closeErr + default: + } + return nil + } +} + +func (m *mu) unlock() { + select { + case <-m.ch: + default: + } +} diff --git a/vendor/nhooyr.io/websocket/dial.go b/vendor/nhooyr.io/websocket/dial.go index 7a7787ff71..bcce4e1774 100644 --- a/vendor/nhooyr.io/websocket/dial.go +++ b/vendor/nhooyr.io/websocket/dial.go @@ -1,292 +1,292 @@ -// +build !js - -package websocket - -import ( - "bufio" - "bytes" - "context" - "crypto/rand" - "encoding/base64" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strings" - "sync" - "time" - - "nhooyr.io/websocket/internal/errd" -) - -// DialOptions represents Dial's options. -type DialOptions struct { - // HTTPClient is used for the connection. - // Its Transport must return writable bodies for WebSocket handshakes. - // http.Transport does beginning with Go 1.12. - HTTPClient *http.Client - - // HTTPHeader specifies the HTTP headers included in the handshake request. - HTTPHeader http.Header - - // Subprotocols lists the WebSocket subprotocols to negotiate with the server. - Subprotocols []string - - // CompressionMode controls the compression mode. - // Defaults to CompressionNoContextTakeover. - // - // See docs on CompressionMode for details. - CompressionMode CompressionMode - - // CompressionThreshold controls the minimum size of a message before compression is applied. - // - // Defaults to 512 bytes for CompressionNoContextTakeover and 128 bytes - // for CompressionContextTakeover. - CompressionThreshold int -} - -// Dial performs a WebSocket handshake on url. -// -// The response is the WebSocket handshake response from the server. -// You never need to close resp.Body yourself. -// -// If an error occurs, the returned response may be non nil. -// However, you can only read the first 1024 bytes of the body. -// -// This function requires at least Go 1.12 as it uses a new feature -// in net/http to perform WebSocket handshakes. -// See docs on the HTTPClient option and https://github.com/golang/go/issues/26937#issuecomment-415855861 -// -// URLs with http/https schemes will work and are interpreted as ws/wss. -func Dial(ctx context.Context, u string, opts *DialOptions) (*Conn, *http.Response, error) { - return dial(ctx, u, opts, nil) -} - -func dial(ctx context.Context, urls string, opts *DialOptions, rand io.Reader) (_ *Conn, _ *http.Response, err error) { - defer errd.Wrap(&err, "failed to WebSocket dial") - - if opts == nil { - opts = &DialOptions{} - } - - opts = &*opts - if opts.HTTPClient == nil { - opts.HTTPClient = http.DefaultClient - } else if opts.HTTPClient.Timeout > 0 { - var cancel context.CancelFunc - - ctx, cancel = context.WithTimeout(ctx, opts.HTTPClient.Timeout) - defer cancel() - - newClient := *opts.HTTPClient - newClient.Timeout = 0 - opts.HTTPClient = &newClient - } - - if opts.HTTPHeader == nil { - opts.HTTPHeader = http.Header{} - } - - secWebSocketKey, err := secWebSocketKey(rand) - if err != nil { - return nil, nil, fmt.Errorf("failed to generate Sec-WebSocket-Key: %w", err) - } - - var copts *compressionOptions - if opts.CompressionMode != CompressionDisabled { - copts = opts.CompressionMode.opts() - } - - resp, err := handshakeRequest(ctx, urls, opts, copts, secWebSocketKey) - if err != nil { - return nil, resp, err - } - respBody := resp.Body - resp.Body = nil - defer func() { - if err != nil { - // We read a bit of the body for easier debugging. - r := io.LimitReader(respBody, 1024) - - timer := time.AfterFunc(time.Second*3, func() { - respBody.Close() - }) - defer timer.Stop() - - b, _ := ioutil.ReadAll(r) - respBody.Close() - resp.Body = ioutil.NopCloser(bytes.NewReader(b)) - } - }() - - copts, err = verifyServerResponse(opts, copts, secWebSocketKey, resp) - if err != nil { - return nil, resp, err - } - - rwc, ok := respBody.(io.ReadWriteCloser) - if !ok { - return nil, resp, fmt.Errorf("response body is not a io.ReadWriteCloser: %T", respBody) - } - - return newConn(connConfig{ - subprotocol: resp.Header.Get("Sec-WebSocket-Protocol"), - rwc: rwc, - client: true, - copts: copts, - flateThreshold: opts.CompressionThreshold, - br: getBufioReader(rwc), - bw: getBufioWriter(rwc), - }), resp, nil -} - -func handshakeRequest(ctx context.Context, urls string, opts *DialOptions, copts *compressionOptions, secWebSocketKey string) (*http.Response, error) { - u, err := url.Parse(urls) - if err != nil { - return nil, fmt.Errorf("failed to parse url: %w", err) - } - - switch u.Scheme { - case "ws": - u.Scheme = "http" - case "wss": - u.Scheme = "https" - case "http", "https": - default: - return nil, fmt.Errorf("unexpected url scheme: %q", u.Scheme) - } - - req, _ := http.NewRequestWithContext(ctx, "GET", u.String(), nil) - req.Header = opts.HTTPHeader.Clone() - req.Header.Set("Connection", "Upgrade") - req.Header.Set("Upgrade", "websocket") - req.Header.Set("Sec-WebSocket-Version", "13") - req.Header.Set("Sec-WebSocket-Key", secWebSocketKey) - if len(opts.Subprotocols) > 0 { - req.Header.Set("Sec-WebSocket-Protocol", strings.Join(opts.Subprotocols, ",")) - } - if copts != nil { - copts.setHeader(req.Header) - } - - resp, err := opts.HTTPClient.Do(req) - if err != nil { - return nil, fmt.Errorf("failed to send handshake request: %w", err) - } - return resp, nil -} - -func secWebSocketKey(rr io.Reader) (string, error) { - if rr == nil { - rr = rand.Reader - } - b := make([]byte, 16) - _, err := io.ReadFull(rr, b) - if err != nil { - return "", fmt.Errorf("failed to read random data from rand.Reader: %w", err) - } - return base64.StdEncoding.EncodeToString(b), nil -} - -func verifyServerResponse(opts *DialOptions, copts *compressionOptions, secWebSocketKey string, resp *http.Response) (*compressionOptions, error) { - if resp.StatusCode != http.StatusSwitchingProtocols { - return nil, fmt.Errorf("expected handshake response status code %v but got %v", http.StatusSwitchingProtocols, resp.StatusCode) - } - - if !headerContainsTokenIgnoreCase(resp.Header, "Connection", "Upgrade") { - return nil, fmt.Errorf("WebSocket protocol violation: Connection header %q does not contain Upgrade", resp.Header.Get("Connection")) - } - - if !headerContainsTokenIgnoreCase(resp.Header, "Upgrade", "WebSocket") { - return nil, fmt.Errorf("WebSocket protocol violation: Upgrade header %q does not contain websocket", resp.Header.Get("Upgrade")) - } - - if resp.Header.Get("Sec-WebSocket-Accept") != secWebSocketAccept(secWebSocketKey) { - return nil, fmt.Errorf("WebSocket protocol violation: invalid Sec-WebSocket-Accept %q, key %q", - resp.Header.Get("Sec-WebSocket-Accept"), - secWebSocketKey, - ) - } - - err := verifySubprotocol(opts.Subprotocols, resp) - if err != nil { - return nil, err - } - - return verifyServerExtensions(copts, resp.Header) -} - -func verifySubprotocol(subprotos []string, resp *http.Response) error { - proto := resp.Header.Get("Sec-WebSocket-Protocol") - if proto == "" { - return nil - } - - for _, sp2 := range subprotos { - if strings.EqualFold(sp2, proto) { - return nil - } - } - - return fmt.Errorf("WebSocket protocol violation: unexpected Sec-WebSocket-Protocol from server: %q", proto) -} - -func verifyServerExtensions(copts *compressionOptions, h http.Header) (*compressionOptions, error) { - exts := websocketExtensions(h) - if len(exts) == 0 { - return nil, nil - } - - ext := exts[0] - if ext.name != "permessage-deflate" || len(exts) > 1 || copts == nil { - return nil, fmt.Errorf("WebSocket protcol violation: unsupported extensions from server: %+v", exts[1:]) - } - - copts = &*copts - - for _, p := range ext.params { - switch p { - case "client_no_context_takeover": - copts.clientNoContextTakeover = true - continue - case "server_no_context_takeover": - copts.serverNoContextTakeover = true - continue - } - - return nil, fmt.Errorf("unsupported permessage-deflate parameter: %q", p) - } - - return copts, nil -} - -var bufioReaderPool sync.Pool - -func getBufioReader(r io.Reader) *bufio.Reader { - br, ok := bufioReaderPool.Get().(*bufio.Reader) - if !ok { - return bufio.NewReader(r) - } - br.Reset(r) - return br -} - -func putBufioReader(br *bufio.Reader) { - bufioReaderPool.Put(br) -} - -var bufioWriterPool sync.Pool - -func getBufioWriter(w io.Writer) *bufio.Writer { - bw, ok := bufioWriterPool.Get().(*bufio.Writer) - if !ok { - return bufio.NewWriter(w) - } - bw.Reset(w) - return bw -} - -func putBufioWriter(bw *bufio.Writer) { - bufioWriterPool.Put(bw) -} +// +build !js + +package websocket + +import ( + "bufio" + "bytes" + "context" + "crypto/rand" + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + "sync" + "time" + + "nhooyr.io/websocket/internal/errd" +) + +// DialOptions represents Dial's options. +type DialOptions struct { + // HTTPClient is used for the connection. + // Its Transport must return writable bodies for WebSocket handshakes. + // http.Transport does beginning with Go 1.12. + HTTPClient *http.Client + + // HTTPHeader specifies the HTTP headers included in the handshake request. + HTTPHeader http.Header + + // Subprotocols lists the WebSocket subprotocols to negotiate with the server. + Subprotocols []string + + // CompressionMode controls the compression mode. + // Defaults to CompressionNoContextTakeover. + // + // See docs on CompressionMode for details. + CompressionMode CompressionMode + + // CompressionThreshold controls the minimum size of a message before compression is applied. + // + // Defaults to 512 bytes for CompressionNoContextTakeover and 128 bytes + // for CompressionContextTakeover. + CompressionThreshold int +} + +// Dial performs a WebSocket handshake on url. +// +// The response is the WebSocket handshake response from the server. +// You never need to close resp.Body yourself. +// +// If an error occurs, the returned response may be non nil. +// However, you can only read the first 1024 bytes of the body. +// +// This function requires at least Go 1.12 as it uses a new feature +// in net/http to perform WebSocket handshakes. +// See docs on the HTTPClient option and https://github.com/golang/go/issues/26937#issuecomment-415855861 +// +// URLs with http/https schemes will work and are interpreted as ws/wss. +func Dial(ctx context.Context, u string, opts *DialOptions) (*Conn, *http.Response, error) { + return dial(ctx, u, opts, nil) +} + +func dial(ctx context.Context, urls string, opts *DialOptions, rand io.Reader) (_ *Conn, _ *http.Response, err error) { + defer errd.Wrap(&err, "failed to WebSocket dial") + + if opts == nil { + opts = &DialOptions{} + } + + opts = &*opts + if opts.HTTPClient == nil { + opts.HTTPClient = http.DefaultClient + } else if opts.HTTPClient.Timeout > 0 { + var cancel context.CancelFunc + + ctx, cancel = context.WithTimeout(ctx, opts.HTTPClient.Timeout) + defer cancel() + + newClient := *opts.HTTPClient + newClient.Timeout = 0 + opts.HTTPClient = &newClient + } + + if opts.HTTPHeader == nil { + opts.HTTPHeader = http.Header{} + } + + secWebSocketKey, err := secWebSocketKey(rand) + if err != nil { + return nil, nil, fmt.Errorf("failed to generate Sec-WebSocket-Key: %w", err) + } + + var copts *compressionOptions + if opts.CompressionMode != CompressionDisabled { + copts = opts.CompressionMode.opts() + } + + resp, err := handshakeRequest(ctx, urls, opts, copts, secWebSocketKey) + if err != nil { + return nil, resp, err + } + respBody := resp.Body + resp.Body = nil + defer func() { + if err != nil { + // We read a bit of the body for easier debugging. + r := io.LimitReader(respBody, 1024) + + timer := time.AfterFunc(time.Second*3, func() { + respBody.Close() + }) + defer timer.Stop() + + b, _ := ioutil.ReadAll(r) + respBody.Close() + resp.Body = ioutil.NopCloser(bytes.NewReader(b)) + } + }() + + copts, err = verifyServerResponse(opts, copts, secWebSocketKey, resp) + if err != nil { + return nil, resp, err + } + + rwc, ok := respBody.(io.ReadWriteCloser) + if !ok { + return nil, resp, fmt.Errorf("response body is not a io.ReadWriteCloser: %T", respBody) + } + + return newConn(connConfig{ + subprotocol: resp.Header.Get("Sec-WebSocket-Protocol"), + rwc: rwc, + client: true, + copts: copts, + flateThreshold: opts.CompressionThreshold, + br: getBufioReader(rwc), + bw: getBufioWriter(rwc), + }), resp, nil +} + +func handshakeRequest(ctx context.Context, urls string, opts *DialOptions, copts *compressionOptions, secWebSocketKey string) (*http.Response, error) { + u, err := url.Parse(urls) + if err != nil { + return nil, fmt.Errorf("failed to parse url: %w", err) + } + + switch u.Scheme { + case "ws": + u.Scheme = "http" + case "wss": + u.Scheme = "https" + case "http", "https": + default: + return nil, fmt.Errorf("unexpected url scheme: %q", u.Scheme) + } + + req, _ := http.NewRequestWithContext(ctx, "GET", u.String(), nil) + req.Header = opts.HTTPHeader.Clone() + req.Header.Set("Connection", "Upgrade") + req.Header.Set("Upgrade", "websocket") + req.Header.Set("Sec-WebSocket-Version", "13") + req.Header.Set("Sec-WebSocket-Key", secWebSocketKey) + if len(opts.Subprotocols) > 0 { + req.Header.Set("Sec-WebSocket-Protocol", strings.Join(opts.Subprotocols, ",")) + } + if copts != nil { + copts.setHeader(req.Header) + } + + resp, err := opts.HTTPClient.Do(req) + if err != nil { + return nil, fmt.Errorf("failed to send handshake request: %w", err) + } + return resp, nil +} + +func secWebSocketKey(rr io.Reader) (string, error) { + if rr == nil { + rr = rand.Reader + } + b := make([]byte, 16) + _, err := io.ReadFull(rr, b) + if err != nil { + return "", fmt.Errorf("failed to read random data from rand.Reader: %w", err) + } + return base64.StdEncoding.EncodeToString(b), nil +} + +func verifyServerResponse(opts *DialOptions, copts *compressionOptions, secWebSocketKey string, resp *http.Response) (*compressionOptions, error) { + if resp.StatusCode != http.StatusSwitchingProtocols { + return nil, fmt.Errorf("expected handshake response status code %v but got %v", http.StatusSwitchingProtocols, resp.StatusCode) + } + + if !headerContainsTokenIgnoreCase(resp.Header, "Connection", "Upgrade") { + return nil, fmt.Errorf("WebSocket protocol violation: Connection header %q does not contain Upgrade", resp.Header.Get("Connection")) + } + + if !headerContainsTokenIgnoreCase(resp.Header, "Upgrade", "WebSocket") { + return nil, fmt.Errorf("WebSocket protocol violation: Upgrade header %q does not contain websocket", resp.Header.Get("Upgrade")) + } + + if resp.Header.Get("Sec-WebSocket-Accept") != secWebSocketAccept(secWebSocketKey) { + return nil, fmt.Errorf("WebSocket protocol violation: invalid Sec-WebSocket-Accept %q, key %q", + resp.Header.Get("Sec-WebSocket-Accept"), + secWebSocketKey, + ) + } + + err := verifySubprotocol(opts.Subprotocols, resp) + if err != nil { + return nil, err + } + + return verifyServerExtensions(copts, resp.Header) +} + +func verifySubprotocol(subprotos []string, resp *http.Response) error { + proto := resp.Header.Get("Sec-WebSocket-Protocol") + if proto == "" { + return nil + } + + for _, sp2 := range subprotos { + if strings.EqualFold(sp2, proto) { + return nil + } + } + + return fmt.Errorf("WebSocket protocol violation: unexpected Sec-WebSocket-Protocol from server: %q", proto) +} + +func verifyServerExtensions(copts *compressionOptions, h http.Header) (*compressionOptions, error) { + exts := websocketExtensions(h) + if len(exts) == 0 { + return nil, nil + } + + ext := exts[0] + if ext.name != "permessage-deflate" || len(exts) > 1 || copts == nil { + return nil, fmt.Errorf("WebSocket protcol violation: unsupported extensions from server: %+v", exts[1:]) + } + + copts = &*copts + + for _, p := range ext.params { + switch p { + case "client_no_context_takeover": + copts.clientNoContextTakeover = true + continue + case "server_no_context_takeover": + copts.serverNoContextTakeover = true + continue + } + + return nil, fmt.Errorf("unsupported permessage-deflate parameter: %q", p) + } + + return copts, nil +} + +var bufioReaderPool sync.Pool + +func getBufioReader(r io.Reader) *bufio.Reader { + br, ok := bufioReaderPool.Get().(*bufio.Reader) + if !ok { + return bufio.NewReader(r) + } + br.Reset(r) + return br +} + +func putBufioReader(br *bufio.Reader) { + bufioReaderPool.Put(br) +} + +var bufioWriterPool sync.Pool + +func getBufioWriter(w io.Writer) *bufio.Writer { + bw, ok := bufioWriterPool.Get().(*bufio.Writer) + if !ok { + return bufio.NewWriter(w) + } + bw.Reset(w) + return bw +} + +func putBufioWriter(bw *bufio.Writer) { + bufioWriterPool.Put(bw) +} diff --git a/vendor/nhooyr.io/websocket/doc.go b/vendor/nhooyr.io/websocket/doc.go index efa920e3b6..1464712767 100644 --- a/vendor/nhooyr.io/websocket/doc.go +++ b/vendor/nhooyr.io/websocket/doc.go @@ -1,32 +1,32 @@ -// +build !js - -// Package websocket implements the RFC 6455 WebSocket protocol. -// -// https://tools.ietf.org/html/rfc6455 -// -// Use Dial to dial a WebSocket server. -// -// Use Accept to accept a WebSocket client. -// -// Conn represents the resulting WebSocket connection. -// -// The examples are the best way to understand how to correctly use the library. -// -// The wsjson and wspb subpackages contain helpers for JSON and protobuf messages. -// -// More documentation at https://nhooyr.io/websocket. -// -// Wasm -// -// The client side supports compiling to Wasm. -// It wraps the WebSocket browser API. -// -// See https://developer.mozilla.org/en-US/docs/Web/API/WebSocket -// -// Some important caveats to be aware of: -// -// - Accept always errors out -// - Conn.Ping is no-op -// - HTTPClient, HTTPHeader and CompressionMode in DialOptions are no-op -// - *http.Response from Dial is &http.Response{} with a 101 status code on success -package websocket // import "nhooyr.io/websocket" +// +build !js + +// Package websocket implements the RFC 6455 WebSocket protocol. +// +// https://tools.ietf.org/html/rfc6455 +// +// Use Dial to dial a WebSocket server. +// +// Use Accept to accept a WebSocket client. +// +// Conn represents the resulting WebSocket connection. +// +// The examples are the best way to understand how to correctly use the library. +// +// The wsjson and wspb subpackages contain helpers for JSON and protobuf messages. +// +// More documentation at https://nhooyr.io/websocket. +// +// Wasm +// +// The client side supports compiling to Wasm. +// It wraps the WebSocket browser API. +// +// See https://developer.mozilla.org/en-US/docs/Web/API/WebSocket +// +// Some important caveats to be aware of: +// +// - Accept always errors out +// - Conn.Ping is no-op +// - HTTPClient, HTTPHeader and CompressionMode in DialOptions are no-op +// - *http.Response from Dial is &http.Response{} with a 101 status code on success +package websocket // import "nhooyr.io/websocket" diff --git a/vendor/nhooyr.io/websocket/frame.go b/vendor/nhooyr.io/websocket/frame.go index 2a036f944a..fdda3a6bfa 100644 --- a/vendor/nhooyr.io/websocket/frame.go +++ b/vendor/nhooyr.io/websocket/frame.go @@ -1,294 +1,294 @@ -package websocket - -import ( - "bufio" - "encoding/binary" - "fmt" - "io" - "math" - "math/bits" - - "nhooyr.io/websocket/internal/errd" -) - -// opcode represents a WebSocket opcode. -type opcode int - -// https://tools.ietf.org/html/rfc6455#section-11.8. -const ( - opContinuation opcode = iota - opText - opBinary - // 3 - 7 are reserved for further non-control frames. - _ - _ - _ - _ - _ - opClose - opPing - opPong - // 11-16 are reserved for further control frames. -) - -// header represents a WebSocket frame header. -// See https://tools.ietf.org/html/rfc6455#section-5.2. -type header struct { - fin bool - rsv1 bool - rsv2 bool - rsv3 bool - opcode opcode - - payloadLength int64 - - masked bool - maskKey uint32 -} - -// readFrameHeader reads a header from the reader. -// See https://tools.ietf.org/html/rfc6455#section-5.2. -func readFrameHeader(r *bufio.Reader, readBuf []byte) (h header, err error) { - defer errd.Wrap(&err, "failed to read frame header") - - b, err := r.ReadByte() - if err != nil { - return header{}, err - } - - h.fin = b&(1<<7) != 0 - h.rsv1 = b&(1<<6) != 0 - h.rsv2 = b&(1<<5) != 0 - h.rsv3 = b&(1<<4) != 0 - - h.opcode = opcode(b & 0xf) - - b, err = r.ReadByte() - if err != nil { - return header{}, err - } - - h.masked = b&(1<<7) != 0 - - payloadLength := b &^ (1 << 7) - switch { - case payloadLength < 126: - h.payloadLength = int64(payloadLength) - case payloadLength == 126: - _, err = io.ReadFull(r, readBuf[:2]) - h.payloadLength = int64(binary.BigEndian.Uint16(readBuf)) - case payloadLength == 127: - _, err = io.ReadFull(r, readBuf) - h.payloadLength = int64(binary.BigEndian.Uint64(readBuf)) - } - if err != nil { - return header{}, err - } - - if h.payloadLength < 0 { - return header{}, fmt.Errorf("received negative payload length: %v", h.payloadLength) - } - - if h.masked { - _, err = io.ReadFull(r, readBuf[:4]) - if err != nil { - return header{}, err - } - h.maskKey = binary.LittleEndian.Uint32(readBuf) - } - - return h, nil -} - -// maxControlPayload is the maximum length of a control frame payload. -// See https://tools.ietf.org/html/rfc6455#section-5.5. -const maxControlPayload = 125 - -// writeFrameHeader writes the bytes of the header to w. -// See https://tools.ietf.org/html/rfc6455#section-5.2 -func writeFrameHeader(h header, w *bufio.Writer, buf []byte) (err error) { - defer errd.Wrap(&err, "failed to write frame header") - - var b byte - if h.fin { - b |= 1 << 7 - } - if h.rsv1 { - b |= 1 << 6 - } - if h.rsv2 { - b |= 1 << 5 - } - if h.rsv3 { - b |= 1 << 4 - } - - b |= byte(h.opcode) - - err = w.WriteByte(b) - if err != nil { - return err - } - - lengthByte := byte(0) - if h.masked { - lengthByte |= 1 << 7 - } - - switch { - case h.payloadLength > math.MaxUint16: - lengthByte |= 127 - case h.payloadLength > 125: - lengthByte |= 126 - case h.payloadLength >= 0: - lengthByte |= byte(h.payloadLength) - } - err = w.WriteByte(lengthByte) - if err != nil { - return err - } - - switch { - case h.payloadLength > math.MaxUint16: - binary.BigEndian.PutUint64(buf, uint64(h.payloadLength)) - _, err = w.Write(buf) - case h.payloadLength > 125: - binary.BigEndian.PutUint16(buf, uint16(h.payloadLength)) - _, err = w.Write(buf[:2]) - } - if err != nil { - return err - } - - if h.masked { - binary.LittleEndian.PutUint32(buf, h.maskKey) - _, err = w.Write(buf[:4]) - if err != nil { - return err - } - } - - return nil -} - -// mask applies the WebSocket masking algorithm to p -// with the given key. -// See https://tools.ietf.org/html/rfc6455#section-5.3 -// -// The returned value is the correctly rotated key to -// to continue to mask/unmask the message. -// -// It is optimized for LittleEndian and expects the key -// to be in little endian. -// -// See https://github.com/golang/go/issues/31586 -func mask(key uint32, b []byte) uint32 { - if len(b) >= 8 { - key64 := uint64(key)<<32 | uint64(key) - - // At some point in the future we can clean these unrolled loops up. - // See https://github.com/golang/go/issues/31586#issuecomment-487436401 - - // Then we xor until b is less than 128 bytes. - for len(b) >= 128 { - v := binary.LittleEndian.Uint64(b) - binary.LittleEndian.PutUint64(b, v^key64) - v = binary.LittleEndian.Uint64(b[8:16]) - binary.LittleEndian.PutUint64(b[8:16], v^key64) - v = binary.LittleEndian.Uint64(b[16:24]) - binary.LittleEndian.PutUint64(b[16:24], v^key64) - v = binary.LittleEndian.Uint64(b[24:32]) - binary.LittleEndian.PutUint64(b[24:32], v^key64) - v = binary.LittleEndian.Uint64(b[32:40]) - binary.LittleEndian.PutUint64(b[32:40], v^key64) - v = binary.LittleEndian.Uint64(b[40:48]) - binary.LittleEndian.PutUint64(b[40:48], v^key64) - v = binary.LittleEndian.Uint64(b[48:56]) - binary.LittleEndian.PutUint64(b[48:56], v^key64) - v = binary.LittleEndian.Uint64(b[56:64]) - binary.LittleEndian.PutUint64(b[56:64], v^key64) - v = binary.LittleEndian.Uint64(b[64:72]) - binary.LittleEndian.PutUint64(b[64:72], v^key64) - v = binary.LittleEndian.Uint64(b[72:80]) - binary.LittleEndian.PutUint64(b[72:80], v^key64) - v = binary.LittleEndian.Uint64(b[80:88]) - binary.LittleEndian.PutUint64(b[80:88], v^key64) - v = binary.LittleEndian.Uint64(b[88:96]) - binary.LittleEndian.PutUint64(b[88:96], v^key64) - v = binary.LittleEndian.Uint64(b[96:104]) - binary.LittleEndian.PutUint64(b[96:104], v^key64) - v = binary.LittleEndian.Uint64(b[104:112]) - binary.LittleEndian.PutUint64(b[104:112], v^key64) - v = binary.LittleEndian.Uint64(b[112:120]) - binary.LittleEndian.PutUint64(b[112:120], v^key64) - v = binary.LittleEndian.Uint64(b[120:128]) - binary.LittleEndian.PutUint64(b[120:128], v^key64) - b = b[128:] - } - - // Then we xor until b is less than 64 bytes. - for len(b) >= 64 { - v := binary.LittleEndian.Uint64(b) - binary.LittleEndian.PutUint64(b, v^key64) - v = binary.LittleEndian.Uint64(b[8:16]) - binary.LittleEndian.PutUint64(b[8:16], v^key64) - v = binary.LittleEndian.Uint64(b[16:24]) - binary.LittleEndian.PutUint64(b[16:24], v^key64) - v = binary.LittleEndian.Uint64(b[24:32]) - binary.LittleEndian.PutUint64(b[24:32], v^key64) - v = binary.LittleEndian.Uint64(b[32:40]) - binary.LittleEndian.PutUint64(b[32:40], v^key64) - v = binary.LittleEndian.Uint64(b[40:48]) - binary.LittleEndian.PutUint64(b[40:48], v^key64) - v = binary.LittleEndian.Uint64(b[48:56]) - binary.LittleEndian.PutUint64(b[48:56], v^key64) - v = binary.LittleEndian.Uint64(b[56:64]) - binary.LittleEndian.PutUint64(b[56:64], v^key64) - b = b[64:] - } - - // Then we xor until b is less than 32 bytes. - for len(b) >= 32 { - v := binary.LittleEndian.Uint64(b) - binary.LittleEndian.PutUint64(b, v^key64) - v = binary.LittleEndian.Uint64(b[8:16]) - binary.LittleEndian.PutUint64(b[8:16], v^key64) - v = binary.LittleEndian.Uint64(b[16:24]) - binary.LittleEndian.PutUint64(b[16:24], v^key64) - v = binary.LittleEndian.Uint64(b[24:32]) - binary.LittleEndian.PutUint64(b[24:32], v^key64) - b = b[32:] - } - - // Then we xor until b is less than 16 bytes. - for len(b) >= 16 { - v := binary.LittleEndian.Uint64(b) - binary.LittleEndian.PutUint64(b, v^key64) - v = binary.LittleEndian.Uint64(b[8:16]) - binary.LittleEndian.PutUint64(b[8:16], v^key64) - b = b[16:] - } - - // Then we xor until b is less than 8 bytes. - for len(b) >= 8 { - v := binary.LittleEndian.Uint64(b) - binary.LittleEndian.PutUint64(b, v^key64) - b = b[8:] - } - } - - // Then we xor until b is less than 4 bytes. - for len(b) >= 4 { - v := binary.LittleEndian.Uint32(b) - binary.LittleEndian.PutUint32(b, v^key) - b = b[4:] - } - - // xor remaining bytes. - for i := range b { - b[i] ^= byte(key) - key = bits.RotateLeft32(key, -8) - } - - return key -} +package websocket + +import ( + "bufio" + "encoding/binary" + "fmt" + "io" + "math" + "math/bits" + + "nhooyr.io/websocket/internal/errd" +) + +// opcode represents a WebSocket opcode. +type opcode int + +// https://tools.ietf.org/html/rfc6455#section-11.8. +const ( + opContinuation opcode = iota + opText + opBinary + // 3 - 7 are reserved for further non-control frames. + _ + _ + _ + _ + _ + opClose + opPing + opPong + // 11-16 are reserved for further control frames. +) + +// header represents a WebSocket frame header. +// See https://tools.ietf.org/html/rfc6455#section-5.2. +type header struct { + fin bool + rsv1 bool + rsv2 bool + rsv3 bool + opcode opcode + + payloadLength int64 + + masked bool + maskKey uint32 +} + +// readFrameHeader reads a header from the reader. +// See https://tools.ietf.org/html/rfc6455#section-5.2. +func readFrameHeader(r *bufio.Reader, readBuf []byte) (h header, err error) { + defer errd.Wrap(&err, "failed to read frame header") + + b, err := r.ReadByte() + if err != nil { + return header{}, err + } + + h.fin = b&(1<<7) != 0 + h.rsv1 = b&(1<<6) != 0 + h.rsv2 = b&(1<<5) != 0 + h.rsv3 = b&(1<<4) != 0 + + h.opcode = opcode(b & 0xf) + + b, err = r.ReadByte() + if err != nil { + return header{}, err + } + + h.masked = b&(1<<7) != 0 + + payloadLength := b &^ (1 << 7) + switch { + case payloadLength < 126: + h.payloadLength = int64(payloadLength) + case payloadLength == 126: + _, err = io.ReadFull(r, readBuf[:2]) + h.payloadLength = int64(binary.BigEndian.Uint16(readBuf)) + case payloadLength == 127: + _, err = io.ReadFull(r, readBuf) + h.payloadLength = int64(binary.BigEndian.Uint64(readBuf)) + } + if err != nil { + return header{}, err + } + + if h.payloadLength < 0 { + return header{}, fmt.Errorf("received negative payload length: %v", h.payloadLength) + } + + if h.masked { + _, err = io.ReadFull(r, readBuf[:4]) + if err != nil { + return header{}, err + } + h.maskKey = binary.LittleEndian.Uint32(readBuf) + } + + return h, nil +} + +// maxControlPayload is the maximum length of a control frame payload. +// See https://tools.ietf.org/html/rfc6455#section-5.5. +const maxControlPayload = 125 + +// writeFrameHeader writes the bytes of the header to w. +// See https://tools.ietf.org/html/rfc6455#section-5.2 +func writeFrameHeader(h header, w *bufio.Writer, buf []byte) (err error) { + defer errd.Wrap(&err, "failed to write frame header") + + var b byte + if h.fin { + b |= 1 << 7 + } + if h.rsv1 { + b |= 1 << 6 + } + if h.rsv2 { + b |= 1 << 5 + } + if h.rsv3 { + b |= 1 << 4 + } + + b |= byte(h.opcode) + + err = w.WriteByte(b) + if err != nil { + return err + } + + lengthByte := byte(0) + if h.masked { + lengthByte |= 1 << 7 + } + + switch { + case h.payloadLength > math.MaxUint16: + lengthByte |= 127 + case h.payloadLength > 125: + lengthByte |= 126 + case h.payloadLength >= 0: + lengthByte |= byte(h.payloadLength) + } + err = w.WriteByte(lengthByte) + if err != nil { + return err + } + + switch { + case h.payloadLength > math.MaxUint16: + binary.BigEndian.PutUint64(buf, uint64(h.payloadLength)) + _, err = w.Write(buf) + case h.payloadLength > 125: + binary.BigEndian.PutUint16(buf, uint16(h.payloadLength)) + _, err = w.Write(buf[:2]) + } + if err != nil { + return err + } + + if h.masked { + binary.LittleEndian.PutUint32(buf, h.maskKey) + _, err = w.Write(buf[:4]) + if err != nil { + return err + } + } + + return nil +} + +// mask applies the WebSocket masking algorithm to p +// with the given key. +// See https://tools.ietf.org/html/rfc6455#section-5.3 +// +// The returned value is the correctly rotated key to +// to continue to mask/unmask the message. +// +// It is optimized for LittleEndian and expects the key +// to be in little endian. +// +// See https://github.com/golang/go/issues/31586 +func mask(key uint32, b []byte) uint32 { + if len(b) >= 8 { + key64 := uint64(key)<<32 | uint64(key) + + // At some point in the future we can clean these unrolled loops up. + // See https://github.com/golang/go/issues/31586#issuecomment-487436401 + + // Then we xor until b is less than 128 bytes. + for len(b) >= 128 { + v := binary.LittleEndian.Uint64(b) + binary.LittleEndian.PutUint64(b, v^key64) + v = binary.LittleEndian.Uint64(b[8:16]) + binary.LittleEndian.PutUint64(b[8:16], v^key64) + v = binary.LittleEndian.Uint64(b[16:24]) + binary.LittleEndian.PutUint64(b[16:24], v^key64) + v = binary.LittleEndian.Uint64(b[24:32]) + binary.LittleEndian.PutUint64(b[24:32], v^key64) + v = binary.LittleEndian.Uint64(b[32:40]) + binary.LittleEndian.PutUint64(b[32:40], v^key64) + v = binary.LittleEndian.Uint64(b[40:48]) + binary.LittleEndian.PutUint64(b[40:48], v^key64) + v = binary.LittleEndian.Uint64(b[48:56]) + binary.LittleEndian.PutUint64(b[48:56], v^key64) + v = binary.LittleEndian.Uint64(b[56:64]) + binary.LittleEndian.PutUint64(b[56:64], v^key64) + v = binary.LittleEndian.Uint64(b[64:72]) + binary.LittleEndian.PutUint64(b[64:72], v^key64) + v = binary.LittleEndian.Uint64(b[72:80]) + binary.LittleEndian.PutUint64(b[72:80], v^key64) + v = binary.LittleEndian.Uint64(b[80:88]) + binary.LittleEndian.PutUint64(b[80:88], v^key64) + v = binary.LittleEndian.Uint64(b[88:96]) + binary.LittleEndian.PutUint64(b[88:96], v^key64) + v = binary.LittleEndian.Uint64(b[96:104]) + binary.LittleEndian.PutUint64(b[96:104], v^key64) + v = binary.LittleEndian.Uint64(b[104:112]) + binary.LittleEndian.PutUint64(b[104:112], v^key64) + v = binary.LittleEndian.Uint64(b[112:120]) + binary.LittleEndian.PutUint64(b[112:120], v^key64) + v = binary.LittleEndian.Uint64(b[120:128]) + binary.LittleEndian.PutUint64(b[120:128], v^key64) + b = b[128:] + } + + // Then we xor until b is less than 64 bytes. + for len(b) >= 64 { + v := binary.LittleEndian.Uint64(b) + binary.LittleEndian.PutUint64(b, v^key64) + v = binary.LittleEndian.Uint64(b[8:16]) + binary.LittleEndian.PutUint64(b[8:16], v^key64) + v = binary.LittleEndian.Uint64(b[16:24]) + binary.LittleEndian.PutUint64(b[16:24], v^key64) + v = binary.LittleEndian.Uint64(b[24:32]) + binary.LittleEndian.PutUint64(b[24:32], v^key64) + v = binary.LittleEndian.Uint64(b[32:40]) + binary.LittleEndian.PutUint64(b[32:40], v^key64) + v = binary.LittleEndian.Uint64(b[40:48]) + binary.LittleEndian.PutUint64(b[40:48], v^key64) + v = binary.LittleEndian.Uint64(b[48:56]) + binary.LittleEndian.PutUint64(b[48:56], v^key64) + v = binary.LittleEndian.Uint64(b[56:64]) + binary.LittleEndian.PutUint64(b[56:64], v^key64) + b = b[64:] + } + + // Then we xor until b is less than 32 bytes. + for len(b) >= 32 { + v := binary.LittleEndian.Uint64(b) + binary.LittleEndian.PutUint64(b, v^key64) + v = binary.LittleEndian.Uint64(b[8:16]) + binary.LittleEndian.PutUint64(b[8:16], v^key64) + v = binary.LittleEndian.Uint64(b[16:24]) + binary.LittleEndian.PutUint64(b[16:24], v^key64) + v = binary.LittleEndian.Uint64(b[24:32]) + binary.LittleEndian.PutUint64(b[24:32], v^key64) + b = b[32:] + } + + // Then we xor until b is less than 16 bytes. + for len(b) >= 16 { + v := binary.LittleEndian.Uint64(b) + binary.LittleEndian.PutUint64(b, v^key64) + v = binary.LittleEndian.Uint64(b[8:16]) + binary.LittleEndian.PutUint64(b[8:16], v^key64) + b = b[16:] + } + + // Then we xor until b is less than 8 bytes. + for len(b) >= 8 { + v := binary.LittleEndian.Uint64(b) + binary.LittleEndian.PutUint64(b, v^key64) + b = b[8:] + } + } + + // Then we xor until b is less than 4 bytes. + for len(b) >= 4 { + v := binary.LittleEndian.Uint32(b) + binary.LittleEndian.PutUint32(b, v^key) + b = b[4:] + } + + // xor remaining bytes. + for i := range b { + b[i] ^= byte(key) + key = bits.RotateLeft32(key, -8) + } + + return key +} diff --git a/vendor/nhooyr.io/websocket/internal/bpool/bpool.go b/vendor/nhooyr.io/websocket/internal/bpool/bpool.go index aa826fba2b..f694aed1d7 100644 --- a/vendor/nhooyr.io/websocket/internal/bpool/bpool.go +++ b/vendor/nhooyr.io/websocket/internal/bpool/bpool.go @@ -1,24 +1,24 @@ -package bpool - -import ( - "bytes" - "sync" -) - -var bpool sync.Pool - -// Get returns a buffer from the pool or creates a new one if -// the pool is empty. -func Get() *bytes.Buffer { - b := bpool.Get() - if b == nil { - return &bytes.Buffer{} - } - return b.(*bytes.Buffer) -} - -// Put returns a buffer into the pool. -func Put(b *bytes.Buffer) { - b.Reset() - bpool.Put(b) -} +package bpool + +import ( + "bytes" + "sync" +) + +var bpool sync.Pool + +// Get returns a buffer from the pool or creates a new one if +// the pool is empty. +func Get() *bytes.Buffer { + b := bpool.Get() + if b == nil { + return &bytes.Buffer{} + } + return b.(*bytes.Buffer) +} + +// Put returns a buffer into the pool. +func Put(b *bytes.Buffer) { + b.Reset() + bpool.Put(b) +} diff --git a/vendor/nhooyr.io/websocket/internal/errd/wrap.go b/vendor/nhooyr.io/websocket/internal/errd/wrap.go index 6e779131af..ab95afed62 100644 --- a/vendor/nhooyr.io/websocket/internal/errd/wrap.go +++ b/vendor/nhooyr.io/websocket/internal/errd/wrap.go @@ -1,14 +1,14 @@ -package errd - -import ( - "fmt" -) - -// Wrap wraps err with fmt.Errorf if err is non nil. -// Intended for use with defer and a named error return. -// Inspired by https://github.com/golang/go/issues/32676. -func Wrap(err *error, f string, v ...interface{}) { - if *err != nil { - *err = fmt.Errorf(f+": %w", append(v, *err)...) - } -} +package errd + +import ( + "fmt" +) + +// Wrap wraps err with fmt.Errorf if err is non nil. +// Intended for use with defer and a named error return. +// Inspired by https://github.com/golang/go/issues/32676. +func Wrap(err *error, f string, v ...interface{}) { + if *err != nil { + *err = fmt.Errorf(f+": %w", append(v, *err)...) + } +} diff --git a/vendor/nhooyr.io/websocket/internal/wsjs/wsjs_js.go b/vendor/nhooyr.io/websocket/internal/wsjs/wsjs_js.go index 26ffb45625..096504c426 100644 --- a/vendor/nhooyr.io/websocket/internal/wsjs/wsjs_js.go +++ b/vendor/nhooyr.io/websocket/internal/wsjs/wsjs_js.go @@ -1,170 +1,170 @@ -// +build js - -// Package wsjs implements typed access to the browser javascript WebSocket API. -// -// https://developer.mozilla.org/en-US/docs/Web/API/WebSocket -package wsjs - -import ( - "syscall/js" -) - -func handleJSError(err *error, onErr func()) { - r := recover() - - if jsErr, ok := r.(js.Error); ok { - *err = jsErr - - if onErr != nil { - onErr() - } - return - } - - if r != nil { - panic(r) - } -} - -// New is a wrapper around the javascript WebSocket constructor. -func New(url string, protocols []string) (c WebSocket, err error) { - defer handleJSError(&err, func() { - c = WebSocket{} - }) - - jsProtocols := make([]interface{}, len(protocols)) - for i, p := range protocols { - jsProtocols[i] = p - } - - c = WebSocket{ - v: js.Global().Get("WebSocket").New(url, jsProtocols), - } - - c.setBinaryType("arraybuffer") - - return c, nil -} - -// WebSocket is a wrapper around a javascript WebSocket object. -type WebSocket struct { - v js.Value -} - -func (c WebSocket) setBinaryType(typ string) { - c.v.Set("binaryType", string(typ)) -} - -func (c WebSocket) addEventListener(eventType string, fn func(e js.Value)) func() { - f := js.FuncOf(func(this js.Value, args []js.Value) interface{} { - fn(args[0]) - return nil - }) - c.v.Call("addEventListener", eventType, f) - - return func() { - c.v.Call("removeEventListener", eventType, f) - f.Release() - } -} - -// CloseEvent is the type passed to a WebSocket close handler. -type CloseEvent struct { - Code uint16 - Reason string - WasClean bool -} - -// OnClose registers a function to be called when the WebSocket is closed. -func (c WebSocket) OnClose(fn func(CloseEvent)) (remove func()) { - return c.addEventListener("close", func(e js.Value) { - ce := CloseEvent{ - Code: uint16(e.Get("code").Int()), - Reason: e.Get("reason").String(), - WasClean: e.Get("wasClean").Bool(), - } - fn(ce) - }) -} - -// OnError registers a function to be called when there is an error -// with the WebSocket. -func (c WebSocket) OnError(fn func(e js.Value)) (remove func()) { - return c.addEventListener("error", fn) -} - -// MessageEvent is the type passed to a message handler. -type MessageEvent struct { - // string or []byte. - Data interface{} - - // There are more fields to the interface but we don't use them. - // See https://developer.mozilla.org/en-US/docs/Web/API/MessageEvent -} - -// OnMessage registers a function to be called when the WebSocket receives a message. -func (c WebSocket) OnMessage(fn func(m MessageEvent)) (remove func()) { - return c.addEventListener("message", func(e js.Value) { - var data interface{} - - arrayBuffer := e.Get("data") - if arrayBuffer.Type() == js.TypeString { - data = arrayBuffer.String() - } else { - data = extractArrayBuffer(arrayBuffer) - } - - me := MessageEvent{ - Data: data, - } - fn(me) - - return - }) -} - -// Subprotocol returns the WebSocket subprotocol in use. -func (c WebSocket) Subprotocol() string { - return c.v.Get("protocol").String() -} - -// OnOpen registers a function to be called when the WebSocket is opened. -func (c WebSocket) OnOpen(fn func(e js.Value)) (remove func()) { - return c.addEventListener("open", fn) -} - -// Close closes the WebSocket with the given code and reason. -func (c WebSocket) Close(code int, reason string) (err error) { - defer handleJSError(&err, nil) - c.v.Call("close", code, reason) - return err -} - -// SendText sends the given string as a text message -// on the WebSocket. -func (c WebSocket) SendText(v string) (err error) { - defer handleJSError(&err, nil) - c.v.Call("send", v) - return err -} - -// SendBytes sends the given message as a binary message -// on the WebSocket. -func (c WebSocket) SendBytes(v []byte) (err error) { - defer handleJSError(&err, nil) - c.v.Call("send", uint8Array(v)) - return err -} - -func extractArrayBuffer(arrayBuffer js.Value) []byte { - uint8Array := js.Global().Get("Uint8Array").New(arrayBuffer) - dst := make([]byte, uint8Array.Length()) - js.CopyBytesToGo(dst, uint8Array) - return dst -} - -func uint8Array(src []byte) js.Value { - uint8Array := js.Global().Get("Uint8Array").New(len(src)) - js.CopyBytesToJS(uint8Array, src) - return uint8Array -} +// +build js + +// Package wsjs implements typed access to the browser javascript WebSocket API. +// +// https://developer.mozilla.org/en-US/docs/Web/API/WebSocket +package wsjs + +import ( + "syscall/js" +) + +func handleJSError(err *error, onErr func()) { + r := recover() + + if jsErr, ok := r.(js.Error); ok { + *err = jsErr + + if onErr != nil { + onErr() + } + return + } + + if r != nil { + panic(r) + } +} + +// New is a wrapper around the javascript WebSocket constructor. +func New(url string, protocols []string) (c WebSocket, err error) { + defer handleJSError(&err, func() { + c = WebSocket{} + }) + + jsProtocols := make([]interface{}, len(protocols)) + for i, p := range protocols { + jsProtocols[i] = p + } + + c = WebSocket{ + v: js.Global().Get("WebSocket").New(url, jsProtocols), + } + + c.setBinaryType("arraybuffer") + + return c, nil +} + +// WebSocket is a wrapper around a javascript WebSocket object. +type WebSocket struct { + v js.Value +} + +func (c WebSocket) setBinaryType(typ string) { + c.v.Set("binaryType", string(typ)) +} + +func (c WebSocket) addEventListener(eventType string, fn func(e js.Value)) func() { + f := js.FuncOf(func(this js.Value, args []js.Value) interface{} { + fn(args[0]) + return nil + }) + c.v.Call("addEventListener", eventType, f) + + return func() { + c.v.Call("removeEventListener", eventType, f) + f.Release() + } +} + +// CloseEvent is the type passed to a WebSocket close handler. +type CloseEvent struct { + Code uint16 + Reason string + WasClean bool +} + +// OnClose registers a function to be called when the WebSocket is closed. +func (c WebSocket) OnClose(fn func(CloseEvent)) (remove func()) { + return c.addEventListener("close", func(e js.Value) { + ce := CloseEvent{ + Code: uint16(e.Get("code").Int()), + Reason: e.Get("reason").String(), + WasClean: e.Get("wasClean").Bool(), + } + fn(ce) + }) +} + +// OnError registers a function to be called when there is an error +// with the WebSocket. +func (c WebSocket) OnError(fn func(e js.Value)) (remove func()) { + return c.addEventListener("error", fn) +} + +// MessageEvent is the type passed to a message handler. +type MessageEvent struct { + // string or []byte. + Data interface{} + + // There are more fields to the interface but we don't use them. + // See https://developer.mozilla.org/en-US/docs/Web/API/MessageEvent +} + +// OnMessage registers a function to be called when the WebSocket receives a message. +func (c WebSocket) OnMessage(fn func(m MessageEvent)) (remove func()) { + return c.addEventListener("message", func(e js.Value) { + var data interface{} + + arrayBuffer := e.Get("data") + if arrayBuffer.Type() == js.TypeString { + data = arrayBuffer.String() + } else { + data = extractArrayBuffer(arrayBuffer) + } + + me := MessageEvent{ + Data: data, + } + fn(me) + + return + }) +} + +// Subprotocol returns the WebSocket subprotocol in use. +func (c WebSocket) Subprotocol() string { + return c.v.Get("protocol").String() +} + +// OnOpen registers a function to be called when the WebSocket is opened. +func (c WebSocket) OnOpen(fn func(e js.Value)) (remove func()) { + return c.addEventListener("open", fn) +} + +// Close closes the WebSocket with the given code and reason. +func (c WebSocket) Close(code int, reason string) (err error) { + defer handleJSError(&err, nil) + c.v.Call("close", code, reason) + return err +} + +// SendText sends the given string as a text message +// on the WebSocket. +func (c WebSocket) SendText(v string) (err error) { + defer handleJSError(&err, nil) + c.v.Call("send", v) + return err +} + +// SendBytes sends the given message as a binary message +// on the WebSocket. +func (c WebSocket) SendBytes(v []byte) (err error) { + defer handleJSError(&err, nil) + c.v.Call("send", uint8Array(v)) + return err +} + +func extractArrayBuffer(arrayBuffer js.Value) []byte { + uint8Array := js.Global().Get("Uint8Array").New(arrayBuffer) + dst := make([]byte, uint8Array.Length()) + js.CopyBytesToGo(dst, uint8Array) + return dst +} + +func uint8Array(src []byte) js.Value { + uint8Array := js.Global().Get("Uint8Array").New(len(src)) + js.CopyBytesToJS(uint8Array, src) + return uint8Array +} diff --git a/vendor/nhooyr.io/websocket/internal/xsync/go.go b/vendor/nhooyr.io/websocket/internal/xsync/go.go index 7a61f27fa2..8e026b6ade 100644 --- a/vendor/nhooyr.io/websocket/internal/xsync/go.go +++ b/vendor/nhooyr.io/websocket/internal/xsync/go.go @@ -1,25 +1,25 @@ -package xsync - -import ( - "fmt" -) - -// Go allows running a function in another goroutine -// and waiting for its error. -func Go(fn func() error) <-chan error { - errs := make(chan error, 1) - go func() { - defer func() { - r := recover() - if r != nil { - select { - case errs <- fmt.Errorf("panic in go fn: %v", r): - default: - } - } - }() - errs <- fn() - }() - - return errs -} +package xsync + +import ( + "fmt" +) + +// Go allows running a function in another goroutine +// and waiting for its error. +func Go(fn func() error) <-chan error { + errs := make(chan error, 1) + go func() { + defer func() { + r := recover() + if r != nil { + select { + case errs <- fmt.Errorf("panic in go fn: %v", r): + default: + } + } + }() + errs <- fn() + }() + + return errs +} diff --git a/vendor/nhooyr.io/websocket/internal/xsync/int64.go b/vendor/nhooyr.io/websocket/internal/xsync/int64.go index a0c4020415..029f73a218 100644 --- a/vendor/nhooyr.io/websocket/internal/xsync/int64.go +++ b/vendor/nhooyr.io/websocket/internal/xsync/int64.go @@ -1,23 +1,23 @@ -package xsync - -import ( - "sync/atomic" -) - -// Int64 represents an atomic int64. -type Int64 struct { - // We do not use atomic.Load/StoreInt64 since it does not - // work on 32 bit computers but we need 64 bit integers. - i atomic.Value -} - -// Load loads the int64. -func (v *Int64) Load() int64 { - i, _ := v.i.Load().(int64) - return i -} - -// Store stores the int64. -func (v *Int64) Store(i int64) { - v.i.Store(i) -} +package xsync + +import ( + "sync/atomic" +) + +// Int64 represents an atomic int64. +type Int64 struct { + // We do not use atomic.Load/StoreInt64 since it does not + // work on 32 bit computers but we need 64 bit integers. + i atomic.Value +} + +// Load loads the int64. +func (v *Int64) Load() int64 { + i, _ := v.i.Load().(int64) + return i +} + +// Store stores the int64. +func (v *Int64) Store(i int64) { + v.i.Store(i) +} diff --git a/vendor/nhooyr.io/websocket/netconn.go b/vendor/nhooyr.io/websocket/netconn.go index 64aadf0b99..7ab6f91195 100644 --- a/vendor/nhooyr.io/websocket/netconn.go +++ b/vendor/nhooyr.io/websocket/netconn.go @@ -1,166 +1,166 @@ -package websocket - -import ( - "context" - "fmt" - "io" - "math" - "net" - "sync" - "time" -) - -// NetConn converts a *websocket.Conn into a net.Conn. -// -// It's for tunneling arbitrary protocols over WebSockets. -// Few users of the library will need this but it's tricky to implement -// correctly and so provided in the library. -// See https://github.com/nhooyr/websocket/issues/100. -// -// Every Write to the net.Conn will correspond to a message write of -// the given type on *websocket.Conn. -// -// The passed ctx bounds the lifetime of the net.Conn. If cancelled, -// all reads and writes on the net.Conn will be cancelled. -// -// If a message is read that is not of the correct type, the connection -// will be closed with StatusUnsupportedData and an error will be returned. -// -// Close will close the *websocket.Conn with StatusNormalClosure. -// -// When a deadline is hit, the connection will be closed. This is -// different from most net.Conn implementations where only the -// reading/writing goroutines are interrupted but the connection is kept alive. -// -// The Addr methods will return a mock net.Addr that returns "websocket" for Network -// and "websocket/unknown-addr" for String. -// -// A received StatusNormalClosure or StatusGoingAway close frame will be translated to -// io.EOF when reading. -func NetConn(ctx context.Context, c *Conn, msgType MessageType) net.Conn { - nc := &netConn{ - c: c, - msgType: msgType, - } - - var cancel context.CancelFunc - nc.writeContext, cancel = context.WithCancel(ctx) - nc.writeTimer = time.AfterFunc(math.MaxInt64, cancel) - if !nc.writeTimer.Stop() { - <-nc.writeTimer.C - } - - nc.readContext, cancel = context.WithCancel(ctx) - nc.readTimer = time.AfterFunc(math.MaxInt64, cancel) - if !nc.readTimer.Stop() { - <-nc.readTimer.C - } - - return nc -} - -type netConn struct { - c *Conn - msgType MessageType - - writeTimer *time.Timer - writeContext context.Context - - readTimer *time.Timer - readContext context.Context - - readMu sync.Mutex - eofed bool - reader io.Reader -} - -var _ net.Conn = &netConn{} - -func (c *netConn) Close() error { - return c.c.Close(StatusNormalClosure, "") -} - -func (c *netConn) Write(p []byte) (int, error) { - err := c.c.Write(c.writeContext, c.msgType, p) - if err != nil { - return 0, err - } - return len(p), nil -} - -func (c *netConn) Read(p []byte) (int, error) { - c.readMu.Lock() - defer c.readMu.Unlock() - - if c.eofed { - return 0, io.EOF - } - - if c.reader == nil { - typ, r, err := c.c.Reader(c.readContext) - if err != nil { - switch CloseStatus(err) { - case StatusNormalClosure, StatusGoingAway: - c.eofed = true - return 0, io.EOF - } - return 0, err - } - if typ != c.msgType { - err := fmt.Errorf("unexpected frame type read (expected %v): %v", c.msgType, typ) - c.c.Close(StatusUnsupportedData, err.Error()) - return 0, err - } - c.reader = r - } - - n, err := c.reader.Read(p) - if err == io.EOF { - c.reader = nil - err = nil - } - return n, err -} - -type websocketAddr struct { -} - -func (a websocketAddr) Network() string { - return "websocket" -} - -func (a websocketAddr) String() string { - return "websocket/unknown-addr" -} - -func (c *netConn) RemoteAddr() net.Addr { - return websocketAddr{} -} - -func (c *netConn) LocalAddr() net.Addr { - return websocketAddr{} -} - -func (c *netConn) SetDeadline(t time.Time) error { - c.SetWriteDeadline(t) - c.SetReadDeadline(t) - return nil -} - -func (c *netConn) SetWriteDeadline(t time.Time) error { - if t.IsZero() { - c.writeTimer.Stop() - } else { - c.writeTimer.Reset(t.Sub(time.Now())) - } - return nil -} - -func (c *netConn) SetReadDeadline(t time.Time) error { - if t.IsZero() { - c.readTimer.Stop() - } else { - c.readTimer.Reset(t.Sub(time.Now())) - } - return nil -} +package websocket + +import ( + "context" + "fmt" + "io" + "math" + "net" + "sync" + "time" +) + +// NetConn converts a *websocket.Conn into a net.Conn. +// +// It's for tunneling arbitrary protocols over WebSockets. +// Few users of the library will need this but it's tricky to implement +// correctly and so provided in the library. +// See https://github.com/nhooyr/websocket/issues/100. +// +// Every Write to the net.Conn will correspond to a message write of +// the given type on *websocket.Conn. +// +// The passed ctx bounds the lifetime of the net.Conn. If cancelled, +// all reads and writes on the net.Conn will be cancelled. +// +// If a message is read that is not of the correct type, the connection +// will be closed with StatusUnsupportedData and an error will be returned. +// +// Close will close the *websocket.Conn with StatusNormalClosure. +// +// When a deadline is hit, the connection will be closed. This is +// different from most net.Conn implementations where only the +// reading/writing goroutines are interrupted but the connection is kept alive. +// +// The Addr methods will return a mock net.Addr that returns "websocket" for Network +// and "websocket/unknown-addr" for String. +// +// A received StatusNormalClosure or StatusGoingAway close frame will be translated to +// io.EOF when reading. +func NetConn(ctx context.Context, c *Conn, msgType MessageType) net.Conn { + nc := &netConn{ + c: c, + msgType: msgType, + } + + var cancel context.CancelFunc + nc.writeContext, cancel = context.WithCancel(ctx) + nc.writeTimer = time.AfterFunc(math.MaxInt64, cancel) + if !nc.writeTimer.Stop() { + <-nc.writeTimer.C + } + + nc.readContext, cancel = context.WithCancel(ctx) + nc.readTimer = time.AfterFunc(math.MaxInt64, cancel) + if !nc.readTimer.Stop() { + <-nc.readTimer.C + } + + return nc +} + +type netConn struct { + c *Conn + msgType MessageType + + writeTimer *time.Timer + writeContext context.Context + + readTimer *time.Timer + readContext context.Context + + readMu sync.Mutex + eofed bool + reader io.Reader +} + +var _ net.Conn = &netConn{} + +func (c *netConn) Close() error { + return c.c.Close(StatusNormalClosure, "") +} + +func (c *netConn) Write(p []byte) (int, error) { + err := c.c.Write(c.writeContext, c.msgType, p) + if err != nil { + return 0, err + } + return len(p), nil +} + +func (c *netConn) Read(p []byte) (int, error) { + c.readMu.Lock() + defer c.readMu.Unlock() + + if c.eofed { + return 0, io.EOF + } + + if c.reader == nil { + typ, r, err := c.c.Reader(c.readContext) + if err != nil { + switch CloseStatus(err) { + case StatusNormalClosure, StatusGoingAway: + c.eofed = true + return 0, io.EOF + } + return 0, err + } + if typ != c.msgType { + err := fmt.Errorf("unexpected frame type read (expected %v): %v", c.msgType, typ) + c.c.Close(StatusUnsupportedData, err.Error()) + return 0, err + } + c.reader = r + } + + n, err := c.reader.Read(p) + if err == io.EOF { + c.reader = nil + err = nil + } + return n, err +} + +type websocketAddr struct { +} + +func (a websocketAddr) Network() string { + return "websocket" +} + +func (a websocketAddr) String() string { + return "websocket/unknown-addr" +} + +func (c *netConn) RemoteAddr() net.Addr { + return websocketAddr{} +} + +func (c *netConn) LocalAddr() net.Addr { + return websocketAddr{} +} + +func (c *netConn) SetDeadline(t time.Time) error { + c.SetWriteDeadline(t) + c.SetReadDeadline(t) + return nil +} + +func (c *netConn) SetWriteDeadline(t time.Time) error { + if t.IsZero() { + c.writeTimer.Stop() + } else { + c.writeTimer.Reset(t.Sub(time.Now())) + } + return nil +} + +func (c *netConn) SetReadDeadline(t time.Time) error { + if t.IsZero() { + c.readTimer.Stop() + } else { + c.readTimer.Reset(t.Sub(time.Now())) + } + return nil +} diff --git a/vendor/nhooyr.io/websocket/read.go b/vendor/nhooyr.io/websocket/read.go index ae05cf93ed..bdfdfe6ab7 100644 --- a/vendor/nhooyr.io/websocket/read.go +++ b/vendor/nhooyr.io/websocket/read.go @@ -1,474 +1,474 @@ -// +build !js - -package websocket - -import ( - "bufio" - "context" - "errors" - "fmt" - "io" - "io/ioutil" - "strings" - "time" - - "nhooyr.io/websocket/internal/errd" - "nhooyr.io/websocket/internal/xsync" -) - -// Reader reads from the connection until until there is a WebSocket -// data message to be read. It will handle ping, pong and close frames as appropriate. -// -// It returns the type of the message and an io.Reader to read it. -// The passed context will also bound the reader. -// Ensure you read to EOF otherwise the connection will hang. -// -// Call CloseRead if you do not expect any data messages from the peer. -// -// Only one Reader may be open at a time. -func (c *Conn) Reader(ctx context.Context) (MessageType, io.Reader, error) { - return c.reader(ctx) -} - -// Read is a convenience method around Reader to read a single message -// from the connection. -func (c *Conn) Read(ctx context.Context) (MessageType, []byte, error) { - typ, r, err := c.Reader(ctx) - if err != nil { - return 0, nil, err - } - - b, err := ioutil.ReadAll(r) - return typ, b, err -} - -// CloseRead starts a goroutine to read from the connection until it is closed -// or a data message is received. -// -// Once CloseRead is called you cannot read any messages from the connection. -// The returned context will be cancelled when the connection is closed. -// -// If a data message is received, the connection will be closed with StatusPolicyViolation. -// -// Call CloseRead when you do not expect to read any more messages. -// Since it actively reads from the connection, it will ensure that ping, pong and close -// frames are responded to. This means c.Ping and c.Close will still work as expected. -func (c *Conn) CloseRead(ctx context.Context) context.Context { - ctx, cancel := context.WithCancel(ctx) - go func() { - defer cancel() - c.Reader(ctx) - c.Close(StatusPolicyViolation, "unexpected data message") - }() - return ctx -} - -// SetReadLimit sets the max number of bytes to read for a single message. -// It applies to the Reader and Read methods. -// -// By default, the connection has a message read limit of 32768 bytes. -// -// When the limit is hit, the connection will be closed with StatusMessageTooBig. -func (c *Conn) SetReadLimit(n int64) { - // We add read one more byte than the limit in case - // there is a fin frame that needs to be read. - c.msgReader.limitReader.limit.Store(n + 1) -} - -const defaultReadLimit = 32768 - -func newMsgReader(c *Conn) *msgReader { - mr := &msgReader{ - c: c, - fin: true, - } - mr.readFunc = mr.read - - mr.limitReader = newLimitReader(c, mr.readFunc, defaultReadLimit+1) - return mr -} - -func (mr *msgReader) resetFlate() { - if mr.flateContextTakeover() { - mr.dict.init(32768) - } - if mr.flateBufio == nil { - mr.flateBufio = getBufioReader(mr.readFunc) - } - - mr.flateReader = getFlateReader(mr.flateBufio, mr.dict.buf) - mr.limitReader.r = mr.flateReader - mr.flateTail.Reset(deflateMessageTail) -} - -func (mr *msgReader) putFlateReader() { - if mr.flateReader != nil { - putFlateReader(mr.flateReader) - mr.flateReader = nil - } -} - -func (mr *msgReader) close() { - mr.c.readMu.forceLock() - mr.putFlateReader() - mr.dict.close() - if mr.flateBufio != nil { - putBufioReader(mr.flateBufio) - } - - if mr.c.client { - putBufioReader(mr.c.br) - mr.c.br = nil - } -} - -func (mr *msgReader) flateContextTakeover() bool { - if mr.c.client { - return !mr.c.copts.serverNoContextTakeover - } - return !mr.c.copts.clientNoContextTakeover -} - -func (c *Conn) readRSV1Illegal(h header) bool { - // If compression is disabled, rsv1 is illegal. - if !c.flate() { - return true - } - // rsv1 is only allowed on data frames beginning messages. - if h.opcode != opText && h.opcode != opBinary { - return true - } - return false -} - -func (c *Conn) readLoop(ctx context.Context) (header, error) { - for { - h, err := c.readFrameHeader(ctx) - if err != nil { - return header{}, err - } - - if h.rsv1 && c.readRSV1Illegal(h) || h.rsv2 || h.rsv3 { - err := fmt.Errorf("received header with unexpected rsv bits set: %v:%v:%v", h.rsv1, h.rsv2, h.rsv3) - c.writeError(StatusProtocolError, err) - return header{}, err - } - - if !c.client && !h.masked { - return header{}, errors.New("received unmasked frame from client") - } - - switch h.opcode { - case opClose, opPing, opPong: - err = c.handleControl(ctx, h) - if err != nil { - // Pass through CloseErrors when receiving a close frame. - if h.opcode == opClose && CloseStatus(err) != -1 { - return header{}, err - } - return header{}, fmt.Errorf("failed to handle control frame %v: %w", h.opcode, err) - } - case opContinuation, opText, opBinary: - return h, nil - default: - err := fmt.Errorf("received unknown opcode %v", h.opcode) - c.writeError(StatusProtocolError, err) - return header{}, err - } - } -} - -func (c *Conn) readFrameHeader(ctx context.Context) (header, error) { - select { - case <-c.closed: - return header{}, c.closeErr - case c.readTimeout <- ctx: - } - - h, err := readFrameHeader(c.br, c.readHeaderBuf[:]) - if err != nil { - select { - case <-c.closed: - return header{}, c.closeErr - case <-ctx.Done(): - return header{}, ctx.Err() - default: - c.close(err) - return header{}, err - } - } - - select { - case <-c.closed: - return header{}, c.closeErr - case c.readTimeout <- context.Background(): - } - - return h, nil -} - -func (c *Conn) readFramePayload(ctx context.Context, p []byte) (int, error) { - select { - case <-c.closed: - return 0, c.closeErr - case c.readTimeout <- ctx: - } - - n, err := io.ReadFull(c.br, p) - if err != nil { - select { - case <-c.closed: - return n, c.closeErr - case <-ctx.Done(): - return n, ctx.Err() - default: - err = fmt.Errorf("failed to read frame payload: %w", err) - c.close(err) - return n, err - } - } - - select { - case <-c.closed: - return n, c.closeErr - case c.readTimeout <- context.Background(): - } - - return n, err -} - -func (c *Conn) handleControl(ctx context.Context, h header) (err error) { - if h.payloadLength < 0 || h.payloadLength > maxControlPayload { - err := fmt.Errorf("received control frame payload with invalid length: %d", h.payloadLength) - c.writeError(StatusProtocolError, err) - return err - } - - if !h.fin { - err := errors.New("received fragmented control frame") - c.writeError(StatusProtocolError, err) - return err - } - - ctx, cancel := context.WithTimeout(ctx, time.Second*5) - defer cancel() - - b := c.readControlBuf[:h.payloadLength] - _, err = c.readFramePayload(ctx, b) - if err != nil { - return err - } - - if h.masked { - mask(h.maskKey, b) - } - - switch h.opcode { - case opPing: - return c.writeControl(ctx, opPong, b) - case opPong: - c.activePingsMu.Lock() - pong, ok := c.activePings[string(b)] - c.activePingsMu.Unlock() - if ok { - select { - case pong <- struct{}{}: - default: - } - } - return nil - } - - defer func() { - c.readCloseFrameErr = err - }() - - ce, err := parseClosePayload(b) - if err != nil { - err = fmt.Errorf("received invalid close payload: %w", err) - c.writeError(StatusProtocolError, err) - return err - } - - err = fmt.Errorf("received close frame: %w", ce) - c.setCloseErr(err) - c.writeClose(ce.Code, ce.Reason) - c.close(err) - return err -} - -func (c *Conn) reader(ctx context.Context) (_ MessageType, _ io.Reader, err error) { - defer errd.Wrap(&err, "failed to get reader") - - err = c.readMu.lock(ctx) - if err != nil { - return 0, nil, err - } - defer c.readMu.unlock() - - if !c.msgReader.fin { - err = errors.New("previous message not read to completion") - c.close(fmt.Errorf("failed to get reader: %w", err)) - return 0, nil, err - } - - h, err := c.readLoop(ctx) - if err != nil { - return 0, nil, err - } - - if h.opcode == opContinuation { - err := errors.New("received continuation frame without text or binary frame") - c.writeError(StatusProtocolError, err) - return 0, nil, err - } - - c.msgReader.reset(ctx, h) - - return MessageType(h.opcode), c.msgReader, nil -} - -type msgReader struct { - c *Conn - - ctx context.Context - flate bool - flateReader io.Reader - flateBufio *bufio.Reader - flateTail strings.Reader - limitReader *limitReader - dict slidingWindow - - fin bool - payloadLength int64 - maskKey uint32 - - // readerFunc(mr.Read) to avoid continuous allocations. - readFunc readerFunc -} - -func (mr *msgReader) reset(ctx context.Context, h header) { - mr.ctx = ctx - mr.flate = h.rsv1 - mr.limitReader.reset(mr.readFunc) - - if mr.flate { - mr.resetFlate() - } - - mr.setFrame(h) -} - -func (mr *msgReader) setFrame(h header) { - mr.fin = h.fin - mr.payloadLength = h.payloadLength - mr.maskKey = h.maskKey -} - -func (mr *msgReader) Read(p []byte) (n int, err error) { - err = mr.c.readMu.lock(mr.ctx) - if err != nil { - return 0, fmt.Errorf("failed to read: %w", err) - } - defer mr.c.readMu.unlock() - - n, err = mr.limitReader.Read(p) - if mr.flate && mr.flateContextTakeover() { - p = p[:n] - mr.dict.write(p) - } - if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) && mr.fin && mr.flate { - mr.putFlateReader() - return n, io.EOF - } - if err != nil { - err = fmt.Errorf("failed to read: %w", err) - mr.c.close(err) - } - return n, err -} - -func (mr *msgReader) read(p []byte) (int, error) { - for { - if mr.payloadLength == 0 { - if mr.fin { - if mr.flate { - return mr.flateTail.Read(p) - } - return 0, io.EOF - } - - h, err := mr.c.readLoop(mr.ctx) - if err != nil { - return 0, err - } - if h.opcode != opContinuation { - err := errors.New("received new data message without finishing the previous message") - mr.c.writeError(StatusProtocolError, err) - return 0, err - } - mr.setFrame(h) - - continue - } - - if int64(len(p)) > mr.payloadLength { - p = p[:mr.payloadLength] - } - - n, err := mr.c.readFramePayload(mr.ctx, p) - if err != nil { - return n, err - } - - mr.payloadLength -= int64(n) - - if !mr.c.client { - mr.maskKey = mask(mr.maskKey, p) - } - - return n, nil - } -} - -type limitReader struct { - c *Conn - r io.Reader - limit xsync.Int64 - n int64 -} - -func newLimitReader(c *Conn, r io.Reader, limit int64) *limitReader { - lr := &limitReader{ - c: c, - } - lr.limit.Store(limit) - lr.reset(r) - return lr -} - -func (lr *limitReader) reset(r io.Reader) { - lr.n = lr.limit.Load() - lr.r = r -} - -func (lr *limitReader) Read(p []byte) (int, error) { - if lr.n <= 0 { - err := fmt.Errorf("read limited at %v bytes", lr.limit.Load()) - lr.c.writeError(StatusMessageTooBig, err) - return 0, err - } - - if int64(len(p)) > lr.n { - p = p[:lr.n] - } - n, err := lr.r.Read(p) - lr.n -= int64(n) - return n, err -} - -type readerFunc func(p []byte) (int, error) - -func (f readerFunc) Read(p []byte) (int, error) { - return f(p) -} +// +build !js + +package websocket + +import ( + "bufio" + "context" + "errors" + "fmt" + "io" + "io/ioutil" + "strings" + "time" + + "nhooyr.io/websocket/internal/errd" + "nhooyr.io/websocket/internal/xsync" +) + +// Reader reads from the connection until until there is a WebSocket +// data message to be read. It will handle ping, pong and close frames as appropriate. +// +// It returns the type of the message and an io.Reader to read it. +// The passed context will also bound the reader. +// Ensure you read to EOF otherwise the connection will hang. +// +// Call CloseRead if you do not expect any data messages from the peer. +// +// Only one Reader may be open at a time. +func (c *Conn) Reader(ctx context.Context) (MessageType, io.Reader, error) { + return c.reader(ctx) +} + +// Read is a convenience method around Reader to read a single message +// from the connection. +func (c *Conn) Read(ctx context.Context) (MessageType, []byte, error) { + typ, r, err := c.Reader(ctx) + if err != nil { + return 0, nil, err + } + + b, err := ioutil.ReadAll(r) + return typ, b, err +} + +// CloseRead starts a goroutine to read from the connection until it is closed +// or a data message is received. +// +// Once CloseRead is called you cannot read any messages from the connection. +// The returned context will be cancelled when the connection is closed. +// +// If a data message is received, the connection will be closed with StatusPolicyViolation. +// +// Call CloseRead when you do not expect to read any more messages. +// Since it actively reads from the connection, it will ensure that ping, pong and close +// frames are responded to. This means c.Ping and c.Close will still work as expected. +func (c *Conn) CloseRead(ctx context.Context) context.Context { + ctx, cancel := context.WithCancel(ctx) + go func() { + defer cancel() + c.Reader(ctx) + c.Close(StatusPolicyViolation, "unexpected data message") + }() + return ctx +} + +// SetReadLimit sets the max number of bytes to read for a single message. +// It applies to the Reader and Read methods. +// +// By default, the connection has a message read limit of 32768 bytes. +// +// When the limit is hit, the connection will be closed with StatusMessageTooBig. +func (c *Conn) SetReadLimit(n int64) { + // We add read one more byte than the limit in case + // there is a fin frame that needs to be read. + c.msgReader.limitReader.limit.Store(n + 1) +} + +const defaultReadLimit = 32768 + +func newMsgReader(c *Conn) *msgReader { + mr := &msgReader{ + c: c, + fin: true, + } + mr.readFunc = mr.read + + mr.limitReader = newLimitReader(c, mr.readFunc, defaultReadLimit+1) + return mr +} + +func (mr *msgReader) resetFlate() { + if mr.flateContextTakeover() { + mr.dict.init(32768) + } + if mr.flateBufio == nil { + mr.flateBufio = getBufioReader(mr.readFunc) + } + + mr.flateReader = getFlateReader(mr.flateBufio, mr.dict.buf) + mr.limitReader.r = mr.flateReader + mr.flateTail.Reset(deflateMessageTail) +} + +func (mr *msgReader) putFlateReader() { + if mr.flateReader != nil { + putFlateReader(mr.flateReader) + mr.flateReader = nil + } +} + +func (mr *msgReader) close() { + mr.c.readMu.forceLock() + mr.putFlateReader() + mr.dict.close() + if mr.flateBufio != nil { + putBufioReader(mr.flateBufio) + } + + if mr.c.client { + putBufioReader(mr.c.br) + mr.c.br = nil + } +} + +func (mr *msgReader) flateContextTakeover() bool { + if mr.c.client { + return !mr.c.copts.serverNoContextTakeover + } + return !mr.c.copts.clientNoContextTakeover +} + +func (c *Conn) readRSV1Illegal(h header) bool { + // If compression is disabled, rsv1 is illegal. + if !c.flate() { + return true + } + // rsv1 is only allowed on data frames beginning messages. + if h.opcode != opText && h.opcode != opBinary { + return true + } + return false +} + +func (c *Conn) readLoop(ctx context.Context) (header, error) { + for { + h, err := c.readFrameHeader(ctx) + if err != nil { + return header{}, err + } + + if h.rsv1 && c.readRSV1Illegal(h) || h.rsv2 || h.rsv3 { + err := fmt.Errorf("received header with unexpected rsv bits set: %v:%v:%v", h.rsv1, h.rsv2, h.rsv3) + c.writeError(StatusProtocolError, err) + return header{}, err + } + + if !c.client && !h.masked { + return header{}, errors.New("received unmasked frame from client") + } + + switch h.opcode { + case opClose, opPing, opPong: + err = c.handleControl(ctx, h) + if err != nil { + // Pass through CloseErrors when receiving a close frame. + if h.opcode == opClose && CloseStatus(err) != -1 { + return header{}, err + } + return header{}, fmt.Errorf("failed to handle control frame %v: %w", h.opcode, err) + } + case opContinuation, opText, opBinary: + return h, nil + default: + err := fmt.Errorf("received unknown opcode %v", h.opcode) + c.writeError(StatusProtocolError, err) + return header{}, err + } + } +} + +func (c *Conn) readFrameHeader(ctx context.Context) (header, error) { + select { + case <-c.closed: + return header{}, c.closeErr + case c.readTimeout <- ctx: + } + + h, err := readFrameHeader(c.br, c.readHeaderBuf[:]) + if err != nil { + select { + case <-c.closed: + return header{}, c.closeErr + case <-ctx.Done(): + return header{}, ctx.Err() + default: + c.close(err) + return header{}, err + } + } + + select { + case <-c.closed: + return header{}, c.closeErr + case c.readTimeout <- context.Background(): + } + + return h, nil +} + +func (c *Conn) readFramePayload(ctx context.Context, p []byte) (int, error) { + select { + case <-c.closed: + return 0, c.closeErr + case c.readTimeout <- ctx: + } + + n, err := io.ReadFull(c.br, p) + if err != nil { + select { + case <-c.closed: + return n, c.closeErr + case <-ctx.Done(): + return n, ctx.Err() + default: + err = fmt.Errorf("failed to read frame payload: %w", err) + c.close(err) + return n, err + } + } + + select { + case <-c.closed: + return n, c.closeErr + case c.readTimeout <- context.Background(): + } + + return n, err +} + +func (c *Conn) handleControl(ctx context.Context, h header) (err error) { + if h.payloadLength < 0 || h.payloadLength > maxControlPayload { + err := fmt.Errorf("received control frame payload with invalid length: %d", h.payloadLength) + c.writeError(StatusProtocolError, err) + return err + } + + if !h.fin { + err := errors.New("received fragmented control frame") + c.writeError(StatusProtocolError, err) + return err + } + + ctx, cancel := context.WithTimeout(ctx, time.Second*5) + defer cancel() + + b := c.readControlBuf[:h.payloadLength] + _, err = c.readFramePayload(ctx, b) + if err != nil { + return err + } + + if h.masked { + mask(h.maskKey, b) + } + + switch h.opcode { + case opPing: + return c.writeControl(ctx, opPong, b) + case opPong: + c.activePingsMu.Lock() + pong, ok := c.activePings[string(b)] + c.activePingsMu.Unlock() + if ok { + select { + case pong <- struct{}{}: + default: + } + } + return nil + } + + defer func() { + c.readCloseFrameErr = err + }() + + ce, err := parseClosePayload(b) + if err != nil { + err = fmt.Errorf("received invalid close payload: %w", err) + c.writeError(StatusProtocolError, err) + return err + } + + err = fmt.Errorf("received close frame: %w", ce) + c.setCloseErr(err) + c.writeClose(ce.Code, ce.Reason) + c.close(err) + return err +} + +func (c *Conn) reader(ctx context.Context) (_ MessageType, _ io.Reader, err error) { + defer errd.Wrap(&err, "failed to get reader") + + err = c.readMu.lock(ctx) + if err != nil { + return 0, nil, err + } + defer c.readMu.unlock() + + if !c.msgReader.fin { + err = errors.New("previous message not read to completion") + c.close(fmt.Errorf("failed to get reader: %w", err)) + return 0, nil, err + } + + h, err := c.readLoop(ctx) + if err != nil { + return 0, nil, err + } + + if h.opcode == opContinuation { + err := errors.New("received continuation frame without text or binary frame") + c.writeError(StatusProtocolError, err) + return 0, nil, err + } + + c.msgReader.reset(ctx, h) + + return MessageType(h.opcode), c.msgReader, nil +} + +type msgReader struct { + c *Conn + + ctx context.Context + flate bool + flateReader io.Reader + flateBufio *bufio.Reader + flateTail strings.Reader + limitReader *limitReader + dict slidingWindow + + fin bool + payloadLength int64 + maskKey uint32 + + // readerFunc(mr.Read) to avoid continuous allocations. + readFunc readerFunc +} + +func (mr *msgReader) reset(ctx context.Context, h header) { + mr.ctx = ctx + mr.flate = h.rsv1 + mr.limitReader.reset(mr.readFunc) + + if mr.flate { + mr.resetFlate() + } + + mr.setFrame(h) +} + +func (mr *msgReader) setFrame(h header) { + mr.fin = h.fin + mr.payloadLength = h.payloadLength + mr.maskKey = h.maskKey +} + +func (mr *msgReader) Read(p []byte) (n int, err error) { + err = mr.c.readMu.lock(mr.ctx) + if err != nil { + return 0, fmt.Errorf("failed to read: %w", err) + } + defer mr.c.readMu.unlock() + + n, err = mr.limitReader.Read(p) + if mr.flate && mr.flateContextTakeover() { + p = p[:n] + mr.dict.write(p) + } + if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) && mr.fin && mr.flate { + mr.putFlateReader() + return n, io.EOF + } + if err != nil { + err = fmt.Errorf("failed to read: %w", err) + mr.c.close(err) + } + return n, err +} + +func (mr *msgReader) read(p []byte) (int, error) { + for { + if mr.payloadLength == 0 { + if mr.fin { + if mr.flate { + return mr.flateTail.Read(p) + } + return 0, io.EOF + } + + h, err := mr.c.readLoop(mr.ctx) + if err != nil { + return 0, err + } + if h.opcode != opContinuation { + err := errors.New("received new data message without finishing the previous message") + mr.c.writeError(StatusProtocolError, err) + return 0, err + } + mr.setFrame(h) + + continue + } + + if int64(len(p)) > mr.payloadLength { + p = p[:mr.payloadLength] + } + + n, err := mr.c.readFramePayload(mr.ctx, p) + if err != nil { + return n, err + } + + mr.payloadLength -= int64(n) + + if !mr.c.client { + mr.maskKey = mask(mr.maskKey, p) + } + + return n, nil + } +} + +type limitReader struct { + c *Conn + r io.Reader + limit xsync.Int64 + n int64 +} + +func newLimitReader(c *Conn, r io.Reader, limit int64) *limitReader { + lr := &limitReader{ + c: c, + } + lr.limit.Store(limit) + lr.reset(r) + return lr +} + +func (lr *limitReader) reset(r io.Reader) { + lr.n = lr.limit.Load() + lr.r = r +} + +func (lr *limitReader) Read(p []byte) (int, error) { + if lr.n <= 0 { + err := fmt.Errorf("read limited at %v bytes", lr.limit.Load()) + lr.c.writeError(StatusMessageTooBig, err) + return 0, err + } + + if int64(len(p)) > lr.n { + p = p[:lr.n] + } + n, err := lr.r.Read(p) + lr.n -= int64(n) + return n, err +} + +type readerFunc func(p []byte) (int, error) + +func (f readerFunc) Read(p []byte) (int, error) { + return f(p) +} diff --git a/vendor/nhooyr.io/websocket/stringer.go b/vendor/nhooyr.io/websocket/stringer.go index 5a66ba2907..d246fd37b7 100644 --- a/vendor/nhooyr.io/websocket/stringer.go +++ b/vendor/nhooyr.io/websocket/stringer.go @@ -1,91 +1,91 @@ -// Code generated by "stringer -type=opcode,MessageType,StatusCode -output=stringer.go"; DO NOT EDIT. - -package websocket - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[opContinuation-0] - _ = x[opText-1] - _ = x[opBinary-2] - _ = x[opClose-8] - _ = x[opPing-9] - _ = x[opPong-10] -} - -const ( - _opcode_name_0 = "opContinuationopTextopBinary" - _opcode_name_1 = "opCloseopPingopPong" -) - -var ( - _opcode_index_0 = [...]uint8{0, 14, 20, 28} - _opcode_index_1 = [...]uint8{0, 7, 13, 19} -) - -func (i opcode) String() string { - switch { - case 0 <= i && i <= 2: - return _opcode_name_0[_opcode_index_0[i]:_opcode_index_0[i+1]] - case 8 <= i && i <= 10: - i -= 8 - return _opcode_name_1[_opcode_index_1[i]:_opcode_index_1[i+1]] - default: - return "opcode(" + strconv.FormatInt(int64(i), 10) + ")" - } -} -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[MessageText-1] - _ = x[MessageBinary-2] -} - -const _MessageType_name = "MessageTextMessageBinary" - -var _MessageType_index = [...]uint8{0, 11, 24} - -func (i MessageType) String() string { - i -= 1 - if i < 0 || i >= MessageType(len(_MessageType_index)-1) { - return "MessageType(" + strconv.FormatInt(int64(i+1), 10) + ")" - } - return _MessageType_name[_MessageType_index[i]:_MessageType_index[i+1]] -} -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[StatusNormalClosure-1000] - _ = x[StatusGoingAway-1001] - _ = x[StatusProtocolError-1002] - _ = x[StatusUnsupportedData-1003] - _ = x[statusReserved-1004] - _ = x[StatusNoStatusRcvd-1005] - _ = x[StatusAbnormalClosure-1006] - _ = x[StatusInvalidFramePayloadData-1007] - _ = x[StatusPolicyViolation-1008] - _ = x[StatusMessageTooBig-1009] - _ = x[StatusMandatoryExtension-1010] - _ = x[StatusInternalError-1011] - _ = x[StatusServiceRestart-1012] - _ = x[StatusTryAgainLater-1013] - _ = x[StatusBadGateway-1014] - _ = x[StatusTLSHandshake-1015] -} - -const _StatusCode_name = "StatusNormalClosureStatusGoingAwayStatusProtocolErrorStatusUnsupportedDatastatusReservedStatusNoStatusRcvdStatusAbnormalClosureStatusInvalidFramePayloadDataStatusPolicyViolationStatusMessageTooBigStatusMandatoryExtensionStatusInternalErrorStatusServiceRestartStatusTryAgainLaterStatusBadGatewayStatusTLSHandshake" - -var _StatusCode_index = [...]uint16{0, 19, 34, 53, 74, 88, 106, 127, 156, 177, 196, 220, 239, 259, 278, 294, 312} - -func (i StatusCode) String() string { - i -= 1000 - if i < 0 || i >= StatusCode(len(_StatusCode_index)-1) { - return "StatusCode(" + strconv.FormatInt(int64(i+1000), 10) + ")" - } - return _StatusCode_name[_StatusCode_index[i]:_StatusCode_index[i+1]] -} +// Code generated by "stringer -type=opcode,MessageType,StatusCode -output=stringer.go"; DO NOT EDIT. + +package websocket + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[opContinuation-0] + _ = x[opText-1] + _ = x[opBinary-2] + _ = x[opClose-8] + _ = x[opPing-9] + _ = x[opPong-10] +} + +const ( + _opcode_name_0 = "opContinuationopTextopBinary" + _opcode_name_1 = "opCloseopPingopPong" +) + +var ( + _opcode_index_0 = [...]uint8{0, 14, 20, 28} + _opcode_index_1 = [...]uint8{0, 7, 13, 19} +) + +func (i opcode) String() string { + switch { + case 0 <= i && i <= 2: + return _opcode_name_0[_opcode_index_0[i]:_opcode_index_0[i+1]] + case 8 <= i && i <= 10: + i -= 8 + return _opcode_name_1[_opcode_index_1[i]:_opcode_index_1[i+1]] + default: + return "opcode(" + strconv.FormatInt(int64(i), 10) + ")" + } +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[MessageText-1] + _ = x[MessageBinary-2] +} + +const _MessageType_name = "MessageTextMessageBinary" + +var _MessageType_index = [...]uint8{0, 11, 24} + +func (i MessageType) String() string { + i -= 1 + if i < 0 || i >= MessageType(len(_MessageType_index)-1) { + return "MessageType(" + strconv.FormatInt(int64(i+1), 10) + ")" + } + return _MessageType_name[_MessageType_index[i]:_MessageType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[StatusNormalClosure-1000] + _ = x[StatusGoingAway-1001] + _ = x[StatusProtocolError-1002] + _ = x[StatusUnsupportedData-1003] + _ = x[statusReserved-1004] + _ = x[StatusNoStatusRcvd-1005] + _ = x[StatusAbnormalClosure-1006] + _ = x[StatusInvalidFramePayloadData-1007] + _ = x[StatusPolicyViolation-1008] + _ = x[StatusMessageTooBig-1009] + _ = x[StatusMandatoryExtension-1010] + _ = x[StatusInternalError-1011] + _ = x[StatusServiceRestart-1012] + _ = x[StatusTryAgainLater-1013] + _ = x[StatusBadGateway-1014] + _ = x[StatusTLSHandshake-1015] +} + +const _StatusCode_name = "StatusNormalClosureStatusGoingAwayStatusProtocolErrorStatusUnsupportedDatastatusReservedStatusNoStatusRcvdStatusAbnormalClosureStatusInvalidFramePayloadDataStatusPolicyViolationStatusMessageTooBigStatusMandatoryExtensionStatusInternalErrorStatusServiceRestartStatusTryAgainLaterStatusBadGatewayStatusTLSHandshake" + +var _StatusCode_index = [...]uint16{0, 19, 34, 53, 74, 88, 106, 127, 156, 177, 196, 220, 239, 259, 278, 294, 312} + +func (i StatusCode) String() string { + i -= 1000 + if i < 0 || i >= StatusCode(len(_StatusCode_index)-1) { + return "StatusCode(" + strconv.FormatInt(int64(i+1000), 10) + ")" + } + return _StatusCode_name[_StatusCode_index[i]:_StatusCode_index[i+1]] +} diff --git a/vendor/nhooyr.io/websocket/write.go b/vendor/nhooyr.io/websocket/write.go index 2210cf817a..f973c7c320 100644 --- a/vendor/nhooyr.io/websocket/write.go +++ b/vendor/nhooyr.io/websocket/write.go @@ -1,397 +1,397 @@ -// +build !js - -package websocket - -import ( - "bufio" - "context" - "crypto/rand" - "encoding/binary" - "errors" - "fmt" - "io" - "time" - - "github.com/klauspost/compress/flate" - - "nhooyr.io/websocket/internal/errd" -) - -// Writer returns a writer bounded by the context that will write -// a WebSocket message of type dataType to the connection. -// -// You must close the writer once you have written the entire message. -// -// Only one writer can be open at a time, multiple calls will block until the previous writer -// is closed. -func (c *Conn) Writer(ctx context.Context, typ MessageType) (io.WriteCloser, error) { - w, err := c.writer(ctx, typ) - if err != nil { - return nil, fmt.Errorf("failed to get writer: %w", err) - } - return w, nil -} - -// Write writes a message to the connection. -// -// See the Writer method if you want to stream a message. -// -// If compression is disabled or the threshold is not met, then it -// will write the message in a single frame. -func (c *Conn) Write(ctx context.Context, typ MessageType, p []byte) error { - _, err := c.write(ctx, typ, p) - if err != nil { - return fmt.Errorf("failed to write msg: %w", err) - } - return nil -} - -type msgWriter struct { - mw *msgWriterState - closed bool -} - -func (mw *msgWriter) Write(p []byte) (int, error) { - if mw.closed { - return 0, errors.New("cannot use closed writer") - } - return mw.mw.Write(p) -} - -func (mw *msgWriter) Close() error { - if mw.closed { - return errors.New("cannot use closed writer") - } - mw.closed = true - return mw.mw.Close() -} - -type msgWriterState struct { - c *Conn - - mu *mu - writeMu *mu - - ctx context.Context - opcode opcode - flate bool - - trimWriter *trimLastFourBytesWriter - dict slidingWindow -} - -func newMsgWriterState(c *Conn) *msgWriterState { - mw := &msgWriterState{ - c: c, - mu: newMu(c), - writeMu: newMu(c), - } - return mw -} - -func (mw *msgWriterState) ensureFlate() { - if mw.trimWriter == nil { - mw.trimWriter = &trimLastFourBytesWriter{ - w: writerFunc(mw.write), - } - } - - mw.dict.init(8192) - mw.flate = true -} - -func (mw *msgWriterState) flateContextTakeover() bool { - if mw.c.client { - return !mw.c.copts.clientNoContextTakeover - } - return !mw.c.copts.serverNoContextTakeover -} - -func (c *Conn) writer(ctx context.Context, typ MessageType) (io.WriteCloser, error) { - err := c.msgWriterState.reset(ctx, typ) - if err != nil { - return nil, err - } - return &msgWriter{ - mw: c.msgWriterState, - closed: false, - }, nil -} - -func (c *Conn) write(ctx context.Context, typ MessageType, p []byte) (int, error) { - mw, err := c.writer(ctx, typ) - if err != nil { - return 0, err - } - - if !c.flate() { - defer c.msgWriterState.mu.unlock() - return c.writeFrame(ctx, true, false, c.msgWriterState.opcode, p) - } - - n, err := mw.Write(p) - if err != nil { - return n, err - } - - err = mw.Close() - return n, err -} - -func (mw *msgWriterState) reset(ctx context.Context, typ MessageType) error { - err := mw.mu.lock(ctx) - if err != nil { - return err - } - - mw.ctx = ctx - mw.opcode = opcode(typ) - mw.flate = false - - mw.trimWriter.reset() - - return nil -} - -// Write writes the given bytes to the WebSocket connection. -func (mw *msgWriterState) Write(p []byte) (_ int, err error) { - err = mw.writeMu.lock(mw.ctx) - if err != nil { - return 0, fmt.Errorf("failed to write: %w", err) - } - defer mw.writeMu.unlock() - - defer func() { - if err != nil { - err = fmt.Errorf("failed to write: %w", err) - mw.c.close(err) - } - }() - - if mw.c.flate() { - // Only enables flate if the length crosses the - // threshold on the first frame - if mw.opcode != opContinuation && len(p) >= mw.c.flateThreshold { - mw.ensureFlate() - } - } - - if mw.flate { - err = flate.StatelessDeflate(mw.trimWriter, p, false, mw.dict.buf) - if err != nil { - return 0, err - } - mw.dict.write(p) - return len(p), nil - } - - return mw.write(p) -} - -func (mw *msgWriterState) write(p []byte) (int, error) { - n, err := mw.c.writeFrame(mw.ctx, false, mw.flate, mw.opcode, p) - if err != nil { - return n, fmt.Errorf("failed to write data frame: %w", err) - } - mw.opcode = opContinuation - return n, nil -} - -// Close flushes the frame to the connection. -func (mw *msgWriterState) Close() (err error) { - defer errd.Wrap(&err, "failed to close writer") - - err = mw.writeMu.lock(mw.ctx) - if err != nil { - return err - } - defer mw.writeMu.unlock() - - _, err = mw.c.writeFrame(mw.ctx, true, mw.flate, mw.opcode, nil) - if err != nil { - return fmt.Errorf("failed to write fin frame: %w", err) - } - - if mw.flate && !mw.flateContextTakeover() { - mw.dict.close() - } - mw.mu.unlock() - return nil -} - -func (mw *msgWriterState) close() { - if mw.c.client { - mw.c.writeFrameMu.forceLock() - putBufioWriter(mw.c.bw) - } - - mw.writeMu.forceLock() - mw.dict.close() -} - -func (c *Conn) writeControl(ctx context.Context, opcode opcode, p []byte) error { - ctx, cancel := context.WithTimeout(ctx, time.Second*5) - defer cancel() - - _, err := c.writeFrame(ctx, true, false, opcode, p) - if err != nil { - return fmt.Errorf("failed to write control frame %v: %w", opcode, err) - } - return nil -} - -// frame handles all writes to the connection. -func (c *Conn) writeFrame(ctx context.Context, fin bool, flate bool, opcode opcode, p []byte) (_ int, err error) { - err = c.writeFrameMu.lock(ctx) - if err != nil { - return 0, err - } - defer c.writeFrameMu.unlock() - - // If the state says a close has already been written, we wait until - // the connection is closed and return that error. - // - // However, if the frame being written is a close, that means its the close from - // the state being set so we let it go through. - c.closeMu.Lock() - wroteClose := c.wroteClose - c.closeMu.Unlock() - if wroteClose && opcode != opClose { - select { - case <-ctx.Done(): - return 0, ctx.Err() - case <-c.closed: - return 0, c.closeErr - } - } - - select { - case <-c.closed: - return 0, c.closeErr - case c.writeTimeout <- ctx: - } - - defer func() { - if err != nil { - select { - case <-c.closed: - err = c.closeErr - case <-ctx.Done(): - err = ctx.Err() - } - c.close(err) - err = fmt.Errorf("failed to write frame: %w", err) - } - }() - - c.writeHeader.fin = fin - c.writeHeader.opcode = opcode - c.writeHeader.payloadLength = int64(len(p)) - - if c.client { - c.writeHeader.masked = true - _, err = io.ReadFull(rand.Reader, c.writeHeaderBuf[:4]) - if err != nil { - return 0, fmt.Errorf("failed to generate masking key: %w", err) - } - c.writeHeader.maskKey = binary.LittleEndian.Uint32(c.writeHeaderBuf[:]) - } - - c.writeHeader.rsv1 = false - if flate && (opcode == opText || opcode == opBinary) { - c.writeHeader.rsv1 = true - } - - err = writeFrameHeader(c.writeHeader, c.bw, c.writeHeaderBuf[:]) - if err != nil { - return 0, err - } - - n, err := c.writeFramePayload(p) - if err != nil { - return n, err - } - - if c.writeHeader.fin { - err = c.bw.Flush() - if err != nil { - return n, fmt.Errorf("failed to flush: %w", err) - } - } - - select { - case <-c.closed: - return n, c.closeErr - case c.writeTimeout <- context.Background(): - } - - return n, nil -} - -func (c *Conn) writeFramePayload(p []byte) (n int, err error) { - defer errd.Wrap(&err, "failed to write frame payload") - - if !c.writeHeader.masked { - return c.bw.Write(p) - } - - maskKey := c.writeHeader.maskKey - for len(p) > 0 { - // If the buffer is full, we need to flush. - if c.bw.Available() == 0 { - err = c.bw.Flush() - if err != nil { - return n, err - } - } - - // Start of next write in the buffer. - i := c.bw.Buffered() - - j := len(p) - if j > c.bw.Available() { - j = c.bw.Available() - } - - _, err := c.bw.Write(p[:j]) - if err != nil { - return n, err - } - - maskKey = mask(maskKey, c.writeBuf[i:c.bw.Buffered()]) - - p = p[j:] - n += j - } - - return n, nil -} - -type writerFunc func(p []byte) (int, error) - -func (f writerFunc) Write(p []byte) (int, error) { - return f(p) -} - -// extractBufioWriterBuf grabs the []byte backing a *bufio.Writer -// and returns it. -func extractBufioWriterBuf(bw *bufio.Writer, w io.Writer) []byte { - var writeBuf []byte - bw.Reset(writerFunc(func(p2 []byte) (int, error) { - writeBuf = p2[:cap(p2)] - return len(p2), nil - })) - - bw.WriteByte(0) - bw.Flush() - - bw.Reset(w) - - return writeBuf -} - -func (c *Conn) writeError(code StatusCode, err error) { - c.setCloseErr(err) - c.writeClose(code, err.Error()) - c.close(nil) -} +// +build !js + +package websocket + +import ( + "bufio" + "context" + "crypto/rand" + "encoding/binary" + "errors" + "fmt" + "io" + "time" + + "github.com/klauspost/compress/flate" + + "nhooyr.io/websocket/internal/errd" +) + +// Writer returns a writer bounded by the context that will write +// a WebSocket message of type dataType to the connection. +// +// You must close the writer once you have written the entire message. +// +// Only one writer can be open at a time, multiple calls will block until the previous writer +// is closed. +func (c *Conn) Writer(ctx context.Context, typ MessageType) (io.WriteCloser, error) { + w, err := c.writer(ctx, typ) + if err != nil { + return nil, fmt.Errorf("failed to get writer: %w", err) + } + return w, nil +} + +// Write writes a message to the connection. +// +// See the Writer method if you want to stream a message. +// +// If compression is disabled or the threshold is not met, then it +// will write the message in a single frame. +func (c *Conn) Write(ctx context.Context, typ MessageType, p []byte) error { + _, err := c.write(ctx, typ, p) + if err != nil { + return fmt.Errorf("failed to write msg: %w", err) + } + return nil +} + +type msgWriter struct { + mw *msgWriterState + closed bool +} + +func (mw *msgWriter) Write(p []byte) (int, error) { + if mw.closed { + return 0, errors.New("cannot use closed writer") + } + return mw.mw.Write(p) +} + +func (mw *msgWriter) Close() error { + if mw.closed { + return errors.New("cannot use closed writer") + } + mw.closed = true + return mw.mw.Close() +} + +type msgWriterState struct { + c *Conn + + mu *mu + writeMu *mu + + ctx context.Context + opcode opcode + flate bool + + trimWriter *trimLastFourBytesWriter + dict slidingWindow +} + +func newMsgWriterState(c *Conn) *msgWriterState { + mw := &msgWriterState{ + c: c, + mu: newMu(c), + writeMu: newMu(c), + } + return mw +} + +func (mw *msgWriterState) ensureFlate() { + if mw.trimWriter == nil { + mw.trimWriter = &trimLastFourBytesWriter{ + w: writerFunc(mw.write), + } + } + + mw.dict.init(8192) + mw.flate = true +} + +func (mw *msgWriterState) flateContextTakeover() bool { + if mw.c.client { + return !mw.c.copts.clientNoContextTakeover + } + return !mw.c.copts.serverNoContextTakeover +} + +func (c *Conn) writer(ctx context.Context, typ MessageType) (io.WriteCloser, error) { + err := c.msgWriterState.reset(ctx, typ) + if err != nil { + return nil, err + } + return &msgWriter{ + mw: c.msgWriterState, + closed: false, + }, nil +} + +func (c *Conn) write(ctx context.Context, typ MessageType, p []byte) (int, error) { + mw, err := c.writer(ctx, typ) + if err != nil { + return 0, err + } + + if !c.flate() { + defer c.msgWriterState.mu.unlock() + return c.writeFrame(ctx, true, false, c.msgWriterState.opcode, p) + } + + n, err := mw.Write(p) + if err != nil { + return n, err + } + + err = mw.Close() + return n, err +} + +func (mw *msgWriterState) reset(ctx context.Context, typ MessageType) error { + err := mw.mu.lock(ctx) + if err != nil { + return err + } + + mw.ctx = ctx + mw.opcode = opcode(typ) + mw.flate = false + + mw.trimWriter.reset() + + return nil +} + +// Write writes the given bytes to the WebSocket connection. +func (mw *msgWriterState) Write(p []byte) (_ int, err error) { + err = mw.writeMu.lock(mw.ctx) + if err != nil { + return 0, fmt.Errorf("failed to write: %w", err) + } + defer mw.writeMu.unlock() + + defer func() { + if err != nil { + err = fmt.Errorf("failed to write: %w", err) + mw.c.close(err) + } + }() + + if mw.c.flate() { + // Only enables flate if the length crosses the + // threshold on the first frame + if mw.opcode != opContinuation && len(p) >= mw.c.flateThreshold { + mw.ensureFlate() + } + } + + if mw.flate { + err = flate.StatelessDeflate(mw.trimWriter, p, false, mw.dict.buf) + if err != nil { + return 0, err + } + mw.dict.write(p) + return len(p), nil + } + + return mw.write(p) +} + +func (mw *msgWriterState) write(p []byte) (int, error) { + n, err := mw.c.writeFrame(mw.ctx, false, mw.flate, mw.opcode, p) + if err != nil { + return n, fmt.Errorf("failed to write data frame: %w", err) + } + mw.opcode = opContinuation + return n, nil +} + +// Close flushes the frame to the connection. +func (mw *msgWriterState) Close() (err error) { + defer errd.Wrap(&err, "failed to close writer") + + err = mw.writeMu.lock(mw.ctx) + if err != nil { + return err + } + defer mw.writeMu.unlock() + + _, err = mw.c.writeFrame(mw.ctx, true, mw.flate, mw.opcode, nil) + if err != nil { + return fmt.Errorf("failed to write fin frame: %w", err) + } + + if mw.flate && !mw.flateContextTakeover() { + mw.dict.close() + } + mw.mu.unlock() + return nil +} + +func (mw *msgWriterState) close() { + if mw.c.client { + mw.c.writeFrameMu.forceLock() + putBufioWriter(mw.c.bw) + } + + mw.writeMu.forceLock() + mw.dict.close() +} + +func (c *Conn) writeControl(ctx context.Context, opcode opcode, p []byte) error { + ctx, cancel := context.WithTimeout(ctx, time.Second*5) + defer cancel() + + _, err := c.writeFrame(ctx, true, false, opcode, p) + if err != nil { + return fmt.Errorf("failed to write control frame %v: %w", opcode, err) + } + return nil +} + +// frame handles all writes to the connection. +func (c *Conn) writeFrame(ctx context.Context, fin bool, flate bool, opcode opcode, p []byte) (_ int, err error) { + err = c.writeFrameMu.lock(ctx) + if err != nil { + return 0, err + } + defer c.writeFrameMu.unlock() + + // If the state says a close has already been written, we wait until + // the connection is closed and return that error. + // + // However, if the frame being written is a close, that means its the close from + // the state being set so we let it go through. + c.closeMu.Lock() + wroteClose := c.wroteClose + c.closeMu.Unlock() + if wroteClose && opcode != opClose { + select { + case <-ctx.Done(): + return 0, ctx.Err() + case <-c.closed: + return 0, c.closeErr + } + } + + select { + case <-c.closed: + return 0, c.closeErr + case c.writeTimeout <- ctx: + } + + defer func() { + if err != nil { + select { + case <-c.closed: + err = c.closeErr + case <-ctx.Done(): + err = ctx.Err() + } + c.close(err) + err = fmt.Errorf("failed to write frame: %w", err) + } + }() + + c.writeHeader.fin = fin + c.writeHeader.opcode = opcode + c.writeHeader.payloadLength = int64(len(p)) + + if c.client { + c.writeHeader.masked = true + _, err = io.ReadFull(rand.Reader, c.writeHeaderBuf[:4]) + if err != nil { + return 0, fmt.Errorf("failed to generate masking key: %w", err) + } + c.writeHeader.maskKey = binary.LittleEndian.Uint32(c.writeHeaderBuf[:]) + } + + c.writeHeader.rsv1 = false + if flate && (opcode == opText || opcode == opBinary) { + c.writeHeader.rsv1 = true + } + + err = writeFrameHeader(c.writeHeader, c.bw, c.writeHeaderBuf[:]) + if err != nil { + return 0, err + } + + n, err := c.writeFramePayload(p) + if err != nil { + return n, err + } + + if c.writeHeader.fin { + err = c.bw.Flush() + if err != nil { + return n, fmt.Errorf("failed to flush: %w", err) + } + } + + select { + case <-c.closed: + return n, c.closeErr + case c.writeTimeout <- context.Background(): + } + + return n, nil +} + +func (c *Conn) writeFramePayload(p []byte) (n int, err error) { + defer errd.Wrap(&err, "failed to write frame payload") + + if !c.writeHeader.masked { + return c.bw.Write(p) + } + + maskKey := c.writeHeader.maskKey + for len(p) > 0 { + // If the buffer is full, we need to flush. + if c.bw.Available() == 0 { + err = c.bw.Flush() + if err != nil { + return n, err + } + } + + // Start of next write in the buffer. + i := c.bw.Buffered() + + j := len(p) + if j > c.bw.Available() { + j = c.bw.Available() + } + + _, err := c.bw.Write(p[:j]) + if err != nil { + return n, err + } + + maskKey = mask(maskKey, c.writeBuf[i:c.bw.Buffered()]) + + p = p[j:] + n += j + } + + return n, nil +} + +type writerFunc func(p []byte) (int, error) + +func (f writerFunc) Write(p []byte) (int, error) { + return f(p) +} + +// extractBufioWriterBuf grabs the []byte backing a *bufio.Writer +// and returns it. +func extractBufioWriterBuf(bw *bufio.Writer, w io.Writer) []byte { + var writeBuf []byte + bw.Reset(writerFunc(func(p2 []byte) (int, error) { + writeBuf = p2[:cap(p2)] + return len(p2), nil + })) + + bw.WriteByte(0) + bw.Flush() + + bw.Reset(w) + + return writeBuf +} + +func (c *Conn) writeError(code StatusCode, err error) { + c.setCloseErr(err) + c.writeClose(code, err.Error()) + c.close(nil) +} diff --git a/vendor/nhooyr.io/websocket/ws_js.go b/vendor/nhooyr.io/websocket/ws_js.go index b87e32cdaf..c358544e82 100644 --- a/vendor/nhooyr.io/websocket/ws_js.go +++ b/vendor/nhooyr.io/websocket/ws_js.go @@ -1,379 +1,379 @@ -package websocket // import "nhooyr.io/websocket" - -import ( - "bytes" - "context" - "errors" - "fmt" - "io" - "net/http" - "reflect" - "runtime" - "strings" - "sync" - "syscall/js" - - "nhooyr.io/websocket/internal/bpool" - "nhooyr.io/websocket/internal/wsjs" - "nhooyr.io/websocket/internal/xsync" -) - -// Conn provides a wrapper around the browser WebSocket API. -type Conn struct { - ws wsjs.WebSocket - - // read limit for a message in bytes. - msgReadLimit xsync.Int64 - - closingMu sync.Mutex - isReadClosed xsync.Int64 - closeOnce sync.Once - closed chan struct{} - closeErrOnce sync.Once - closeErr error - closeWasClean bool - - releaseOnClose func() - releaseOnMessage func() - - readSignal chan struct{} - readBufMu sync.Mutex - readBuf []wsjs.MessageEvent -} - -func (c *Conn) close(err error, wasClean bool) { - c.closeOnce.Do(func() { - runtime.SetFinalizer(c, nil) - - if !wasClean { - err = fmt.Errorf("unclean connection close: %w", err) - } - c.setCloseErr(err) - c.closeWasClean = wasClean - close(c.closed) - }) -} - -func (c *Conn) init() { - c.closed = make(chan struct{}) - c.readSignal = make(chan struct{}, 1) - - c.msgReadLimit.Store(32768) - - c.releaseOnClose = c.ws.OnClose(func(e wsjs.CloseEvent) { - err := CloseError{ - Code: StatusCode(e.Code), - Reason: e.Reason, - } - // We do not know if we sent or received this close as - // its possible the browser triggered it without us - // explicitly sending it. - c.close(err, e.WasClean) - - c.releaseOnClose() - c.releaseOnMessage() - }) - - c.releaseOnMessage = c.ws.OnMessage(func(e wsjs.MessageEvent) { - c.readBufMu.Lock() - defer c.readBufMu.Unlock() - - c.readBuf = append(c.readBuf, e) - - // Lets the read goroutine know there is definitely something in readBuf. - select { - case c.readSignal <- struct{}{}: - default: - } - }) - - runtime.SetFinalizer(c, func(c *Conn) { - c.setCloseErr(errors.New("connection garbage collected")) - c.closeWithInternal() - }) -} - -func (c *Conn) closeWithInternal() { - c.Close(StatusInternalError, "something went wrong") -} - -// Read attempts to read a message from the connection. -// The maximum time spent waiting is bounded by the context. -func (c *Conn) Read(ctx context.Context) (MessageType, []byte, error) { - if c.isReadClosed.Load() == 1 { - return 0, nil, errors.New("WebSocket connection read closed") - } - - typ, p, err := c.read(ctx) - if err != nil { - return 0, nil, fmt.Errorf("failed to read: %w", err) - } - if int64(len(p)) > c.msgReadLimit.Load() { - err := fmt.Errorf("read limited at %v bytes", c.msgReadLimit.Load()) - c.Close(StatusMessageTooBig, err.Error()) - return 0, nil, err - } - return typ, p, nil -} - -func (c *Conn) read(ctx context.Context) (MessageType, []byte, error) { - select { - case <-ctx.Done(): - c.Close(StatusPolicyViolation, "read timed out") - return 0, nil, ctx.Err() - case <-c.readSignal: - case <-c.closed: - return 0, nil, c.closeErr - } - - c.readBufMu.Lock() - defer c.readBufMu.Unlock() - - me := c.readBuf[0] - // We copy the messages forward and decrease the size - // of the slice to avoid reallocating. - copy(c.readBuf, c.readBuf[1:]) - c.readBuf = c.readBuf[:len(c.readBuf)-1] - - if len(c.readBuf) > 0 { - // Next time we read, we'll grab the message. - select { - case c.readSignal <- struct{}{}: - default: - } - } - - switch p := me.Data.(type) { - case string: - return MessageText, []byte(p), nil - case []byte: - return MessageBinary, p, nil - default: - panic("websocket: unexpected data type from wsjs OnMessage: " + reflect.TypeOf(me.Data).String()) - } -} - -// Ping is mocked out for Wasm. -func (c *Conn) Ping(ctx context.Context) error { - return nil -} - -// Write writes a message of the given type to the connection. -// Always non blocking. -func (c *Conn) Write(ctx context.Context, typ MessageType, p []byte) error { - err := c.write(ctx, typ, p) - if err != nil { - // Have to ensure the WebSocket is closed after a write error - // to match the Go API. It can only error if the message type - // is unexpected or the passed bytes contain invalid UTF-8 for - // MessageText. - err := fmt.Errorf("failed to write: %w", err) - c.setCloseErr(err) - c.closeWithInternal() - return err - } - return nil -} - -func (c *Conn) write(ctx context.Context, typ MessageType, p []byte) error { - if c.isClosed() { - return c.closeErr - } - switch typ { - case MessageBinary: - return c.ws.SendBytes(p) - case MessageText: - return c.ws.SendText(string(p)) - default: - return fmt.Errorf("unexpected message type: %v", typ) - } -} - -// Close closes the WebSocket with the given code and reason. -// It will wait until the peer responds with a close frame -// or the connection is closed. -// It thus performs the full WebSocket close handshake. -func (c *Conn) Close(code StatusCode, reason string) error { - err := c.exportedClose(code, reason) - if err != nil { - return fmt.Errorf("failed to close WebSocket: %w", err) - } - return nil -} - -func (c *Conn) exportedClose(code StatusCode, reason string) error { - c.closingMu.Lock() - defer c.closingMu.Unlock() - - ce := fmt.Errorf("sent close: %w", CloseError{ - Code: code, - Reason: reason, - }) - - if c.isClosed() { - return fmt.Errorf("tried to close with %q but connection already closed: %w", ce, c.closeErr) - } - - c.setCloseErr(ce) - err := c.ws.Close(int(code), reason) - if err != nil { - return err - } - - <-c.closed - if !c.closeWasClean { - return c.closeErr - } - return nil -} - -// Subprotocol returns the negotiated subprotocol. -// An empty string means the default protocol. -func (c *Conn) Subprotocol() string { - return c.ws.Subprotocol() -} - -// DialOptions represents the options available to pass to Dial. -type DialOptions struct { - // Subprotocols lists the subprotocols to negotiate with the server. - Subprotocols []string -} - -// Dial creates a new WebSocket connection to the given url with the given options. -// The passed context bounds the maximum time spent waiting for the connection to open. -// The returned *http.Response is always nil or a mock. It's only in the signature -// to match the core API. -func Dial(ctx context.Context, url string, opts *DialOptions) (*Conn, *http.Response, error) { - c, resp, err := dial(ctx, url, opts) - if err != nil { - return nil, nil, fmt.Errorf("failed to WebSocket dial %q: %w", url, err) - } - return c, resp, nil -} - -func dial(ctx context.Context, url string, opts *DialOptions) (*Conn, *http.Response, error) { - if opts == nil { - opts = &DialOptions{} - } - - url = strings.Replace(url, "http://", "ws://", 1) - url = strings.Replace(url, "https://", "wss://", 1) - - ws, err := wsjs.New(url, opts.Subprotocols) - if err != nil { - return nil, nil, err - } - - c := &Conn{ - ws: ws, - } - c.init() - - opench := make(chan struct{}) - releaseOpen := ws.OnOpen(func(e js.Value) { - close(opench) - }) - defer releaseOpen() - - select { - case <-ctx.Done(): - c.Close(StatusPolicyViolation, "dial timed out") - return nil, nil, ctx.Err() - case <-opench: - return c, &http.Response{ - StatusCode: http.StatusSwitchingProtocols, - }, nil - case <-c.closed: - return nil, nil, c.closeErr - } -} - -// Reader attempts to read a message from the connection. -// The maximum time spent waiting is bounded by the context. -func (c *Conn) Reader(ctx context.Context) (MessageType, io.Reader, error) { - typ, p, err := c.Read(ctx) - if err != nil { - return 0, nil, err - } - return typ, bytes.NewReader(p), nil -} - -// Writer returns a writer to write a WebSocket data message to the connection. -// It buffers the entire message in memory and then sends it when the writer -// is closed. -func (c *Conn) Writer(ctx context.Context, typ MessageType) (io.WriteCloser, error) { - return writer{ - c: c, - ctx: ctx, - typ: typ, - b: bpool.Get(), - }, nil -} - -type writer struct { - closed bool - - c *Conn - ctx context.Context - typ MessageType - - b *bytes.Buffer -} - -func (w writer) Write(p []byte) (int, error) { - if w.closed { - return 0, errors.New("cannot write to closed writer") - } - n, err := w.b.Write(p) - if err != nil { - return n, fmt.Errorf("failed to write message: %w", err) - } - return n, nil -} - -func (w writer) Close() error { - if w.closed { - return errors.New("cannot close closed writer") - } - w.closed = true - defer bpool.Put(w.b) - - err := w.c.Write(w.ctx, w.typ, w.b.Bytes()) - if err != nil { - return fmt.Errorf("failed to close writer: %w", err) - } - return nil -} - -// CloseRead implements *Conn.CloseRead for wasm. -func (c *Conn) CloseRead(ctx context.Context) context.Context { - c.isReadClosed.Store(1) - - ctx, cancel := context.WithCancel(ctx) - go func() { - defer cancel() - c.read(ctx) - c.Close(StatusPolicyViolation, "unexpected data message") - }() - return ctx -} - -// SetReadLimit implements *Conn.SetReadLimit for wasm. -func (c *Conn) SetReadLimit(n int64) { - c.msgReadLimit.Store(n) -} - -func (c *Conn) setCloseErr(err error) { - c.closeErrOnce.Do(func() { - c.closeErr = fmt.Errorf("WebSocket closed: %w", err) - }) -} - -func (c *Conn) isClosed() bool { - select { - case <-c.closed: - return true - default: - return false - } -} +package websocket // import "nhooyr.io/websocket" + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "net/http" + "reflect" + "runtime" + "strings" + "sync" + "syscall/js" + + "nhooyr.io/websocket/internal/bpool" + "nhooyr.io/websocket/internal/wsjs" + "nhooyr.io/websocket/internal/xsync" +) + +// Conn provides a wrapper around the browser WebSocket API. +type Conn struct { + ws wsjs.WebSocket + + // read limit for a message in bytes. + msgReadLimit xsync.Int64 + + closingMu sync.Mutex + isReadClosed xsync.Int64 + closeOnce sync.Once + closed chan struct{} + closeErrOnce sync.Once + closeErr error + closeWasClean bool + + releaseOnClose func() + releaseOnMessage func() + + readSignal chan struct{} + readBufMu sync.Mutex + readBuf []wsjs.MessageEvent +} + +func (c *Conn) close(err error, wasClean bool) { + c.closeOnce.Do(func() { + runtime.SetFinalizer(c, nil) + + if !wasClean { + err = fmt.Errorf("unclean connection close: %w", err) + } + c.setCloseErr(err) + c.closeWasClean = wasClean + close(c.closed) + }) +} + +func (c *Conn) init() { + c.closed = make(chan struct{}) + c.readSignal = make(chan struct{}, 1) + + c.msgReadLimit.Store(32768) + + c.releaseOnClose = c.ws.OnClose(func(e wsjs.CloseEvent) { + err := CloseError{ + Code: StatusCode(e.Code), + Reason: e.Reason, + } + // We do not know if we sent or received this close as + // its possible the browser triggered it without us + // explicitly sending it. + c.close(err, e.WasClean) + + c.releaseOnClose() + c.releaseOnMessage() + }) + + c.releaseOnMessage = c.ws.OnMessage(func(e wsjs.MessageEvent) { + c.readBufMu.Lock() + defer c.readBufMu.Unlock() + + c.readBuf = append(c.readBuf, e) + + // Lets the read goroutine know there is definitely something in readBuf. + select { + case c.readSignal <- struct{}{}: + default: + } + }) + + runtime.SetFinalizer(c, func(c *Conn) { + c.setCloseErr(errors.New("connection garbage collected")) + c.closeWithInternal() + }) +} + +func (c *Conn) closeWithInternal() { + c.Close(StatusInternalError, "something went wrong") +} + +// Read attempts to read a message from the connection. +// The maximum time spent waiting is bounded by the context. +func (c *Conn) Read(ctx context.Context) (MessageType, []byte, error) { + if c.isReadClosed.Load() == 1 { + return 0, nil, errors.New("WebSocket connection read closed") + } + + typ, p, err := c.read(ctx) + if err != nil { + return 0, nil, fmt.Errorf("failed to read: %w", err) + } + if int64(len(p)) > c.msgReadLimit.Load() { + err := fmt.Errorf("read limited at %v bytes", c.msgReadLimit.Load()) + c.Close(StatusMessageTooBig, err.Error()) + return 0, nil, err + } + return typ, p, nil +} + +func (c *Conn) read(ctx context.Context) (MessageType, []byte, error) { + select { + case <-ctx.Done(): + c.Close(StatusPolicyViolation, "read timed out") + return 0, nil, ctx.Err() + case <-c.readSignal: + case <-c.closed: + return 0, nil, c.closeErr + } + + c.readBufMu.Lock() + defer c.readBufMu.Unlock() + + me := c.readBuf[0] + // We copy the messages forward and decrease the size + // of the slice to avoid reallocating. + copy(c.readBuf, c.readBuf[1:]) + c.readBuf = c.readBuf[:len(c.readBuf)-1] + + if len(c.readBuf) > 0 { + // Next time we read, we'll grab the message. + select { + case c.readSignal <- struct{}{}: + default: + } + } + + switch p := me.Data.(type) { + case string: + return MessageText, []byte(p), nil + case []byte: + return MessageBinary, p, nil + default: + panic("websocket: unexpected data type from wsjs OnMessage: " + reflect.TypeOf(me.Data).String()) + } +} + +// Ping is mocked out for Wasm. +func (c *Conn) Ping(ctx context.Context) error { + return nil +} + +// Write writes a message of the given type to the connection. +// Always non blocking. +func (c *Conn) Write(ctx context.Context, typ MessageType, p []byte) error { + err := c.write(ctx, typ, p) + if err != nil { + // Have to ensure the WebSocket is closed after a write error + // to match the Go API. It can only error if the message type + // is unexpected or the passed bytes contain invalid UTF-8 for + // MessageText. + err := fmt.Errorf("failed to write: %w", err) + c.setCloseErr(err) + c.closeWithInternal() + return err + } + return nil +} + +func (c *Conn) write(ctx context.Context, typ MessageType, p []byte) error { + if c.isClosed() { + return c.closeErr + } + switch typ { + case MessageBinary: + return c.ws.SendBytes(p) + case MessageText: + return c.ws.SendText(string(p)) + default: + return fmt.Errorf("unexpected message type: %v", typ) + } +} + +// Close closes the WebSocket with the given code and reason. +// It will wait until the peer responds with a close frame +// or the connection is closed. +// It thus performs the full WebSocket close handshake. +func (c *Conn) Close(code StatusCode, reason string) error { + err := c.exportedClose(code, reason) + if err != nil { + return fmt.Errorf("failed to close WebSocket: %w", err) + } + return nil +} + +func (c *Conn) exportedClose(code StatusCode, reason string) error { + c.closingMu.Lock() + defer c.closingMu.Unlock() + + ce := fmt.Errorf("sent close: %w", CloseError{ + Code: code, + Reason: reason, + }) + + if c.isClosed() { + return fmt.Errorf("tried to close with %q but connection already closed: %w", ce, c.closeErr) + } + + c.setCloseErr(ce) + err := c.ws.Close(int(code), reason) + if err != nil { + return err + } + + <-c.closed + if !c.closeWasClean { + return c.closeErr + } + return nil +} + +// Subprotocol returns the negotiated subprotocol. +// An empty string means the default protocol. +func (c *Conn) Subprotocol() string { + return c.ws.Subprotocol() +} + +// DialOptions represents the options available to pass to Dial. +type DialOptions struct { + // Subprotocols lists the subprotocols to negotiate with the server. + Subprotocols []string +} + +// Dial creates a new WebSocket connection to the given url with the given options. +// The passed context bounds the maximum time spent waiting for the connection to open. +// The returned *http.Response is always nil or a mock. It's only in the signature +// to match the core API. +func Dial(ctx context.Context, url string, opts *DialOptions) (*Conn, *http.Response, error) { + c, resp, err := dial(ctx, url, opts) + if err != nil { + return nil, nil, fmt.Errorf("failed to WebSocket dial %q: %w", url, err) + } + return c, resp, nil +} + +func dial(ctx context.Context, url string, opts *DialOptions) (*Conn, *http.Response, error) { + if opts == nil { + opts = &DialOptions{} + } + + url = strings.Replace(url, "http://", "ws://", 1) + url = strings.Replace(url, "https://", "wss://", 1) + + ws, err := wsjs.New(url, opts.Subprotocols) + if err != nil { + return nil, nil, err + } + + c := &Conn{ + ws: ws, + } + c.init() + + opench := make(chan struct{}) + releaseOpen := ws.OnOpen(func(e js.Value) { + close(opench) + }) + defer releaseOpen() + + select { + case <-ctx.Done(): + c.Close(StatusPolicyViolation, "dial timed out") + return nil, nil, ctx.Err() + case <-opench: + return c, &http.Response{ + StatusCode: http.StatusSwitchingProtocols, + }, nil + case <-c.closed: + return nil, nil, c.closeErr + } +} + +// Reader attempts to read a message from the connection. +// The maximum time spent waiting is bounded by the context. +func (c *Conn) Reader(ctx context.Context) (MessageType, io.Reader, error) { + typ, p, err := c.Read(ctx) + if err != nil { + return 0, nil, err + } + return typ, bytes.NewReader(p), nil +} + +// Writer returns a writer to write a WebSocket data message to the connection. +// It buffers the entire message in memory and then sends it when the writer +// is closed. +func (c *Conn) Writer(ctx context.Context, typ MessageType) (io.WriteCloser, error) { + return writer{ + c: c, + ctx: ctx, + typ: typ, + b: bpool.Get(), + }, nil +} + +type writer struct { + closed bool + + c *Conn + ctx context.Context + typ MessageType + + b *bytes.Buffer +} + +func (w writer) Write(p []byte) (int, error) { + if w.closed { + return 0, errors.New("cannot write to closed writer") + } + n, err := w.b.Write(p) + if err != nil { + return n, fmt.Errorf("failed to write message: %w", err) + } + return n, nil +} + +func (w writer) Close() error { + if w.closed { + return errors.New("cannot close closed writer") + } + w.closed = true + defer bpool.Put(w.b) + + err := w.c.Write(w.ctx, w.typ, w.b.Bytes()) + if err != nil { + return fmt.Errorf("failed to close writer: %w", err) + } + return nil +} + +// CloseRead implements *Conn.CloseRead for wasm. +func (c *Conn) CloseRead(ctx context.Context) context.Context { + c.isReadClosed.Store(1) + + ctx, cancel := context.WithCancel(ctx) + go func() { + defer cancel() + c.read(ctx) + c.Close(StatusPolicyViolation, "unexpected data message") + }() + return ctx +} + +// SetReadLimit implements *Conn.SetReadLimit for wasm. +func (c *Conn) SetReadLimit(n int64) { + c.msgReadLimit.Store(n) +} + +func (c *Conn) setCloseErr(err error) { + c.closeErrOnce.Do(func() { + c.closeErr = fmt.Errorf("WebSocket closed: %w", err) + }) +} + +func (c *Conn) isClosed() bool { + select { + case <-c.closed: + return true + default: + return false + } +} diff --git a/vendor/nhooyr.io/websocket/wsjson/wsjson.go b/vendor/nhooyr.io/websocket/wsjson/wsjson.go index 2000a77af8..912dc7144e 100644 --- a/vendor/nhooyr.io/websocket/wsjson/wsjson.go +++ b/vendor/nhooyr.io/websocket/wsjson/wsjson.go @@ -1,67 +1,67 @@ -// Package wsjson provides helpers for reading and writing JSON messages. -package wsjson // import "nhooyr.io/websocket/wsjson" - -import ( - "context" - "encoding/json" - "fmt" - - "nhooyr.io/websocket" - "nhooyr.io/websocket/internal/bpool" - "nhooyr.io/websocket/internal/errd" -) - -// Read reads a JSON message from c into v. -// It will reuse buffers in between calls to avoid allocations. -func Read(ctx context.Context, c *websocket.Conn, v interface{}) error { - return read(ctx, c, v) -} - -func read(ctx context.Context, c *websocket.Conn, v interface{}) (err error) { - defer errd.Wrap(&err, "failed to read JSON message") - - _, r, err := c.Reader(ctx) - if err != nil { - return err - } - - b := bpool.Get() - defer bpool.Put(b) - - _, err = b.ReadFrom(r) - if err != nil { - return err - } - - err = json.Unmarshal(b.Bytes(), v) - if err != nil { - c.Close(websocket.StatusInvalidFramePayloadData, "failed to unmarshal JSON") - return fmt.Errorf("failed to unmarshal JSON: %w", err) - } - - return nil -} - -// Write writes the JSON message v to c. -// It will reuse buffers in between calls to avoid allocations. -func Write(ctx context.Context, c *websocket.Conn, v interface{}) error { - return write(ctx, c, v) -} - -func write(ctx context.Context, c *websocket.Conn, v interface{}) (err error) { - defer errd.Wrap(&err, "failed to write JSON message") - - w, err := c.Writer(ctx, websocket.MessageText) - if err != nil { - return err - } - - // json.Marshal cannot reuse buffers between calls as it has to return - // a copy of the byte slice but Encoder does as it directly writes to w. - err = json.NewEncoder(w).Encode(v) - if err != nil { - return fmt.Errorf("failed to marshal JSON: %w", err) - } - - return w.Close() -} +// Package wsjson provides helpers for reading and writing JSON messages. +package wsjson // import "nhooyr.io/websocket/wsjson" + +import ( + "context" + "encoding/json" + "fmt" + + "nhooyr.io/websocket" + "nhooyr.io/websocket/internal/bpool" + "nhooyr.io/websocket/internal/errd" +) + +// Read reads a JSON message from c into v. +// It will reuse buffers in between calls to avoid allocations. +func Read(ctx context.Context, c *websocket.Conn, v interface{}) error { + return read(ctx, c, v) +} + +func read(ctx context.Context, c *websocket.Conn, v interface{}) (err error) { + defer errd.Wrap(&err, "failed to read JSON message") + + _, r, err := c.Reader(ctx) + if err != nil { + return err + } + + b := bpool.Get() + defer bpool.Put(b) + + _, err = b.ReadFrom(r) + if err != nil { + return err + } + + err = json.Unmarshal(b.Bytes(), v) + if err != nil { + c.Close(websocket.StatusInvalidFramePayloadData, "failed to unmarshal JSON") + return fmt.Errorf("failed to unmarshal JSON: %w", err) + } + + return nil +} + +// Write writes the JSON message v to c. +// It will reuse buffers in between calls to avoid allocations. +func Write(ctx context.Context, c *websocket.Conn, v interface{}) error { + return write(ctx, c, v) +} + +func write(ctx context.Context, c *websocket.Conn, v interface{}) (err error) { + defer errd.Wrap(&err, "failed to write JSON message") + + w, err := c.Writer(ctx, websocket.MessageText) + if err != nil { + return err + } + + // json.Marshal cannot reuse buffers between calls as it has to return + // a copy of the byte slice but Encoder does as it directly writes to w. + err = json.NewEncoder(w).Encode(v) + if err != nil { + return fmt.Errorf("failed to marshal JSON: %w", err) + } + + return w.Close() +}